From eed80f8466921fd38f0f489fe62bf9487b1c6680 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?abdullah=20k=C3=BC=C3=A7=C3=BCk=C3=B6d=C3=BCk?= <68288042+akucukoduk16@users.noreply.github.com> Date: Tue, 12 Apr 2022 16:33:29 +0300 Subject: [PATCH 01/31] Abdullah/stubnetwork extended test (#20) * Continue to StubNetwork * TestUnicastOneToAll_Sequentially implemented * TestUnicastOneToAll_Concurrently Implemented * wip * implements test for two engines * TestUnicastOneToAll_Concurrently Implemented * adds concurrent test * adds lock * Some tests Implemented * TestUnicastOneToAll_Concurrently Implemented * TestUnicastOneToAll_Concurrently Implemented * deletion of prints * deletion of prints * deletion of prints * Continue to StubNetwork * TestUnicastOneToAll_Sequentially implemented * TestUnicastOneToAll_Concurrently Implemented * wip * implements test for two engines * TestUnicastOneToAll_Concurrently Implemented * adds concurrent test * adds lock * Some tests Implemented * TestUnicastOneToAll_Concurrently Implemented * deletion of prints * deletion of prints * deletion of prints * rebase * TestUnicastOneToAll_Sequentially implementations * TestUnicastOneToSome_Sequentially implementation * TestUnicastOneToSome_Sequentially implementation * TestUnicastOneToSome_Concurrently implementation * TestUnicastOneToAll_Sequentially_TwoEngines implementation * TestTwoStubNetworks_TwoEngines_Reply_ConcurrentMessages implementation * TestTwoStubNetworks_FourEngines_ConcurrentMessages implementation * TestRegisterToOccupiedChannel implementation * Javadoc modification * Import reorganize * Cl changes * Cl changes * Cl changes * applies revisions * StubNetworkTest is fixed * StubNetworkTest Identifier fixed. * applies revision * applies revisions * applies revisions * applies revisions * fixes lint * applies revision * fixes lint Co-authored-by: yhassanzadeh13 Co-authored-by: akucukoduk16 --- .../LightChainNetworkingException.java | 6 +- src/main/java/network/NetworkAdapter.java | 41 +++ src/test/java/networking/Hub.java | 51 +++- src/test/java/networking/MockConduit.java | 59 +++++ src/test/java/networking/MockEngine.java | 54 +++- src/test/java/networking/StubNetwork.java | 109 +++++++- .../networking/StubNetworkEpidemicTest.java | 236 +++++++++++++++++ src/test/java/networking/StubNetworkTest.java | 246 ++++++++++++++++-- 8 files changed, 777 insertions(+), 25 deletions(-) create mode 100644 src/main/java/network/NetworkAdapter.java create mode 100644 src/test/java/networking/MockConduit.java create mode 100644 src/test/java/networking/StubNetworkEpidemicTest.java diff --git a/src/main/java/model/exceptions/LightChainNetworkingException.java b/src/main/java/model/exceptions/LightChainNetworkingException.java index cff8c42a..bef1f521 100644 --- a/src/main/java/model/exceptions/LightChainNetworkingException.java +++ b/src/main/java/model/exceptions/LightChainNetworkingException.java @@ -3,4 +3,8 @@ /** * Represents a runtime exception happens on the Networking layer of LightChain. */ -public class LightChainNetworkingException extends Exception{ } +public class LightChainNetworkingException extends Exception { + public LightChainNetworkingException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/src/main/java/network/NetworkAdapter.java b/src/main/java/network/NetworkAdapter.java new file mode 100644 index 00000000..e5151104 --- /dev/null +++ b/src/main/java/network/NetworkAdapter.java @@ -0,0 +1,41 @@ +package network; + +import model.Entity; +import model.exceptions.LightChainDistributedStorageException; +import model.exceptions.LightChainNetworkingException; +import model.lightchain.Identifier; + +/** + * NetworkAdapter models the interface that is exposed to the conduits from the networking layer. + */ +public interface NetworkAdapter { + /** + * Sends the Entity through the Network to the remote target. + * + * @param e the Entity to be sent over the network. + * @param target Identifier of the receiver. + * @param channel channel on which this entity is sent. + * @throws LightChainNetworkingException any unhappy path taken on sending the Entity. + */ + void unicast(Entity e, Identifier target, String channel) throws LightChainNetworkingException; + + /** + * Stores given Entity on the underlying Distributed Hash Table (DHT) of nodes. + * + * @param e the Entity to be stored over the network. + * @param namespace namespace on which this entity is stored. + * @throws LightChainDistributedStorageException any unhappy path taken on storing the Entity. + */ + void put(Entity e, String namespace) throws LightChainDistributedStorageException; + + /** + * Retrieves the entity corresponding to the given identifier form the underlying Distributed Hash Table + * (DHT) of nodes. + * + * @param identifier identifier of the entity to be retrieved. + * @param namespace the namespace on which this query is resolved. + * @return the retrieved entity or null if it does not exist. + * @throws LightChainDistributedStorageException any unhappy path taken on retrieving the Entity. + */ + Entity get(Identifier identifier, String namespace) throws LightChainDistributedStorageException; +} diff --git a/src/test/java/networking/Hub.java b/src/test/java/networking/Hub.java index 01861dbd..77a83555 100644 --- a/src/test/java/networking/Hub.java +++ b/src/test/java/networking/Hub.java @@ -10,6 +10,51 @@ * Models the core communication part of the networking layer that allows stub network instances to talk to each other. */ public class Hub { - private ConcurrentHashMap networks; - private ConcurrentHashMap entities; -} + private final ConcurrentHashMap networks; + private final ConcurrentHashMap entities; + + /** + * Create a hub. + */ + public Hub() { + this.networks = new ConcurrentHashMap<>(); + this.entities = new ConcurrentHashMap<>(); + } + + /** + * Registeration of a network to the Hub. + * + * @param identifier identifier of network. + * @param network to be registered. + */ + public void registerNetwork(Identifier identifier, Network network) { + networks.put(identifier, network); + } + + /** + * Transfer entity from to another network on the same channel. + * + * @param entity entity to be transferred. + * @param target identifier of target. + * @param channel channel on which the entity is delivered to target. + */ + public void transferEntity(Entity entity, Identifier target, String channel) throws IllegalStateException { + StubNetwork net = this.getNetwork(target); + try { + net.receiveUnicast(entity, channel); + } catch (IllegalArgumentException ex) { + throw new IllegalStateException("target network failed on receiving unicast: " + ex.getMessage()); + } + + } + + /** + * Get the network with identifier. + * + * @param identifier identity of network. + * @return network corresponding to identifier. + */ + private StubNetwork getNetwork(Identifier identifier) { + return (StubNetwork) networks.get(identifier); + } +} \ No newline at end of file diff --git a/src/test/java/networking/MockConduit.java b/src/test/java/networking/MockConduit.java new file mode 100644 index 00000000..7ff40438 --- /dev/null +++ b/src/test/java/networking/MockConduit.java @@ -0,0 +1,59 @@ +package networking; + +import model.Entity; +import model.exceptions.LightChainDistributedStorageException; +import model.exceptions.LightChainNetworkingException; +import model.lightchain.Identifier; +import network.Conduit; +import network.NetworkAdapter; + +/** + * MockConduit represents the Networking interface that is exposed to an Engine. + */ +public class MockConduit implements Conduit { + + private final String channel; + private final NetworkAdapter networkAdapter; + + public MockConduit(String channel, NetworkAdapter adapter) { + this.channel = channel; + this.networkAdapter = adapter; + } + + /** + * Sends the Entity through the Network to the remote target. + * + * @param e the Entity to be sent over the network. + * @param target Identifier of the receiver. + * @throws LightChainNetworkingException any unhappy path taken on sending the Entity. + */ + @Override + public void unicast(Entity e, Identifier target) throws LightChainNetworkingException { + this.networkAdapter.unicast(e, target, channel); + } + + /** + * Stores given Entity on the underlying Distributed Hash Table (DHT) of nodes. + * + * @param e the Entity to be stored over the network. + * @throws LightChainDistributedStorageException any unhappy path taken on storing the Entity. + */ + @Override + public void put(Entity e) throws LightChainDistributedStorageException { + + } + + /** + * Retrieves the entity corresponding to the given identifier form the underlying Distributed Hash Table + * (DHT) of nodes. + * + * @param identifier identifier of the entity to be retrieved. + * @return the retrieved entity or null if it does not exist. + * @throws LightChainDistributedStorageException any unhappy path taken on retrieving the Entity. + */ + @Override + public Entity get(Identifier identifier) throws LightChainDistributedStorageException { + return null; + } + +} \ No newline at end of file diff --git a/src/test/java/networking/MockEngine.java b/src/test/java/networking/MockEngine.java index 70d82049..6b613b23 100644 --- a/src/test/java/networking/MockEngine.java +++ b/src/test/java/networking/MockEngine.java @@ -1,6 +1,8 @@ package networking; +import java.util.HashSet; import java.util.Set; +import java.util.concurrent.locks.ReentrantReadWriteLock; import model.Entity; import model.lightchain.Identifier; @@ -10,10 +12,56 @@ * Represents a mock implementation of Engine interface for testing. */ public class MockEngine implements Engine { - private Set receivedEntityIds; + private final ReentrantReadWriteLock lock; + private final Set receivedEntityIds; + + public MockEngine() { + this.receivedEntityIds = new HashSet<>(); + this.lock = new ReentrantReadWriteLock(); + } + + /** + * Called by Network whenever an Entity is arrived for this engine. + * + * @param e the arrived Entity from the network. + * @throws IllegalArgumentException any unhappy path taken on processing the Entity. + */ @Override public void process(Entity e) throws IllegalArgumentException { - // TODO: put e.Id() in the set. + lock.writeLock().lock(); + + receivedEntityIds.add(e.id()); + + lock.writeLock().unlock(); + } + + /** + * Check whether an entity is received. + * + * @param e the entity. + * @return true if the entity received, otherwise false. + */ + public boolean hasReceived(Entity e) { + lock.readLock().lock(); + + boolean ok = this.receivedEntityIds.contains(e.id()); + + lock.readLock().unlock(); + return ok; + } + + /** + * Total distinct entities this engine received. + * + * @return total messages it received. + */ + public int totalReceived() { + lock.readLock().lock(); + + int size = receivedEntityIds.size(); + + lock.readLock().unlock(); + return size; } -} +} \ No newline at end of file diff --git a/src/test/java/networking/StubNetwork.java b/src/test/java/networking/StubNetwork.java index 3da34d7c..11b13e4e 100644 --- a/src/test/java/networking/StubNetwork.java +++ b/src/test/java/networking/StubNetwork.java @@ -2,24 +2,127 @@ import java.util.concurrent.ConcurrentHashMap; +import model.Entity; +import model.exceptions.LightChainDistributedStorageException; +import model.exceptions.LightChainNetworkingException; +import model.lightchain.Identifier; import network.Conduit; import network.Network; +import network.NetworkAdapter; import protocol.Engine; +import unittest.fixtures.IdentifierFixture; /** * A mock implementation of networking layer as a test util. */ -public class StubNetwork implements Network { +public class StubNetwork implements Network, NetworkAdapter { private final ConcurrentHashMap engines; private final Hub hub; + private final Identifier identifier; + /** + * Create stubNetwork. + * + * @param hub the hub which stubnetwork registered is. + */ public StubNetwork(Hub hub) { this.engines = new ConcurrentHashMap<>(); this.hub = hub; + this.identifier = IdentifierFixture.newIdentifier(); + this.hub.registerNetwork(identifier, this); } + /** + * Get the identifier of the stubnet. + * + * @return identifier. + */ + public Identifier id() { + return this.identifier; + } + + /** + * Forward the incoming entity to the engine whose channel is given. + * + * @param entity received entity + * @param channel the channel through which the received entity is sent + */ + public void receiveUnicast(Entity entity, String channel) throws IllegalArgumentException { + Engine engine = getEngine(channel); + try { + engine.process(entity); + } catch (IllegalArgumentException e) { + throw new IllegalStateException("could not process the entity", e); + } + } + + /** + * Registers an Engine to the Network by providing it with a Conduit. + * + * @param en the Engine to be registered. + * @param channel the unique channel corresponding to the Engine. + * @return unique Conduit object created to connect the Network to the Engine. + * @throws IllegalStateException if the channel is already taken by another Engine. + */ + @Override + public Conduit register(Engine en, String channel) throws IllegalStateException { + Conduit conduit = new MockConduit(channel, this); + try { + if (engines.containsKey(channel)) { + throw new IllegalStateException(); + } + engines.put(channel, en); + } catch (IllegalArgumentException ex) { + throw new IllegalStateException("could not register the engine"); + } + return conduit; + } + + public Engine getEngine(String ch) { + return engines.get(ch); + } + + /** + * Sends the Entity through the Network to the remote target. + * + * @param e the Entity to be sent over the network. + * @param target Identifier of the receiver. + * @param channel channel on which this entity is sent. + * @throws LightChainNetworkingException any unhappy path taken on sending the Entity. + */ + @Override + public void unicast(Entity e, Identifier target, String channel) throws LightChainNetworkingException { + try { + this.hub.transferEntity(e, target, channel); + } catch (IllegalStateException ex) { + throw new LightChainNetworkingException("stub network could not transfer entity", ex); + } + + } + + /** + * Stores given Entity on the underlying Distributed Hash Table (DHT) of nodes. + * + * @param e the Entity to be stored over the network. + * @param namespace namespace on which this entity is stored. + * @throws LightChainDistributedStorageException any unhappy path taken on storing the Entity. + */ + @Override + public void put(Entity e, String namespace) throws LightChainDistributedStorageException { + + } + + /** + * Retrieves the entity corresponding to the given identifier form the underlying Distributed Hash Table + * (DHT) of nodes. + * + * @param identifier identifier of the entity to be retrieved. + * @param namespace the namespace on which this query is resolved. + * @return the retrieved entity or null if it does not exist. + * @throws LightChainDistributedStorageException any unhappy path taken on retrieving the Entity. + */ @Override - public Conduit register(Engine e, String channel) throws IllegalStateException { + public Entity get(Identifier identifier, String namespace) throws LightChainDistributedStorageException { return null; } -} +} \ No newline at end of file diff --git a/src/test/java/networking/StubNetworkEpidemicTest.java b/src/test/java/networking/StubNetworkEpidemicTest.java new file mode 100644 index 00000000..9bd0766a --- /dev/null +++ b/src/test/java/networking/StubNetworkEpidemicTest.java @@ -0,0 +1,236 @@ +package networking; + +import java.util.ArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import model.Entity; +import model.exceptions.LightChainNetworkingException; +import network.Conduit; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import unittest.fixtures.EntityFixture; + +/** + * Encapsulates one-to-all test cases of stub network. + */ +public class StubNetworkEpidemicTest { + private final String channel1 = "test-network-channel-1"; + private final String channel2 = "test-network-channel-2"; + private ArrayList networks; + private Hub hub; + + + /** + * Creates a hub with 10 connected networks, each network has two mock engines on different channels. + */ + @BeforeEach + void setup() { + this.networks = new ArrayList<>(); + this.hub = new Hub(); + for (int i = 0; i < 9; i++) { + StubNetwork stubNetwork = new StubNetwork(hub); + stubNetwork.register(new MockEngine(), channel1); + stubNetwork.register(new MockEngine(), channel2); + networks.add(stubNetwork); + } + } + + /** + * Test for Unicast one engine to all other stub networks. + */ + @Test + void testUnicastOneToAllSequentially() { + StubNetwork network1 = new StubNetwork(this.hub); + MockEngine a1 = new MockEngine(); + Conduit c1 = network1.register(a1, channel1); + Entity entity = new EntityFixture(); + + for (int i = 0; i < networks.size(); i++) { + try { + c1.unicast(entity, networks.get(i).id()); + MockEngine e1 = (MockEngine) networks.get(i).getEngine(channel1); + MockEngine e2 = (MockEngine) networks.get(i).getEngine(channel2); + + // only engine on channel-1 should receive the entity. + Assertions.assertTrue(e1.hasReceived(entity)); + Assertions.assertFalse(e2.hasReceived(entity)); + } catch (LightChainNetworkingException e) { + Assertions.fail(); + } + } + } + + /** + * Test one engine unicasts to all others concurrently. + */ + @Test + void testUnicastOneToAllConcurrently() { + int concurrencyDegree = 9; + AtomicInteger threadError = new AtomicInteger(); + CountDownLatch sendDone = new CountDownLatch(concurrencyDegree); + + StubNetwork network1 = new StubNetwork(hub); + MockEngine a1 = new MockEngine(); + Conduit c1 = network1.register(a1, channel1); + + Entity entity = new EntityFixture(); + Thread[] unicastThreads = new Thread[concurrencyDegree]; + + for (int i = 0; i < networks.size(); i++) { + int finalI = i; + unicastThreads[i] = new Thread(() -> { + try { + c1.unicast(entity, (this.networks.get(finalI).id())); + MockEngine e1 = (MockEngine) this.networks.get(finalI).getEngine(channel1); + MockEngine e2 = (MockEngine) this.networks.get(finalI).getEngine(channel2); + if (!e1.hasReceived(entity)) { + threadError.getAndIncrement(); + } + if (e2.hasReceived(entity)) { + threadError.getAndIncrement(); + } + sendDone.countDown(); + } catch (LightChainNetworkingException e) { + threadError.getAndIncrement(); + } + }); + } + + for (Thread t : unicastThreads) { + t.start(); + } + try { + boolean doneOneTime = sendDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + + Assertions.assertEquals(0, threadError.get()); + } + + /** + * Test one engine sends unicast to some sequentially. + */ + @Test + void testUnicastOneToSomeSequentially() { + StubNetwork network1 = new StubNetwork(hub); + MockEngine a1 = new MockEngine(); + Conduit c1 = network1.register(a1, channel1); + + Entity entity = new EntityFixture(); + + // unicast only to the first half + for (int i = 0; i < networks.size() / 2; i++) { + try { + c1.unicast(entity, this.networks.get(i).id()); + } catch (LightChainNetworkingException e) { + Assertions.fail(); + } + } + + // checks only first half of network should receive it. + for (int i = 0; i < networks.size(); i++) { + // first half of networks should receive unicast + MockEngine e1 = (MockEngine) this.networks.get(i).getEngine(channel1); + MockEngine e2 = (MockEngine) this.networks.get(i).getEngine(channel2); + if (i < networks.size() / 2) { + + Assertions.assertTrue(e1.hasReceived(entity) // only engine on channel-1 should receive it. + && !e2.hasReceived(entity)); + } else { + Assertions.assertFalse(e1.hasReceived(entity) || e2.hasReceived(entity)); + } + } + + } + + /** + * Test one engine send unicast to some concurrently. + */ + @Test + void testUnicastOneToSomeConcurrently() { + int concurrencyDegree = networks.size() / 2; + AtomicInteger threadError = new AtomicInteger(); + CountDownLatch sentDone = new CountDownLatch(concurrencyDegree); + StubNetwork network1 = new StubNetwork(hub); + + MockEngine a1 = new MockEngine(); + Conduit c1 = network1.register(a1, channel1); + Entity entity = new EntityFixture(); + Thread[] unicastThreads = new Thread[concurrencyDegree]; + + // concurrently unicasts to the first half of network + for (int i = 0; i < concurrencyDegree; i++) { + int finalI = i; + unicastThreads[i] = new Thread(() -> { + try { + c1.unicast(entity, this.networks.get(finalI).id()); + sentDone.countDown(); + } catch (LightChainNetworkingException e) { + threadError.getAndIncrement(); + } + }); + } + + for (Thread t : unicastThreads) { + t.start(); + } + try { + boolean doneOneTime = sentDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + + Assertions.assertEquals(0, threadError.get()); + + // checks only first half of network should receive it. + for (int i = 0; i < networks.size(); i++) { + // first half of networks should receive unicast + MockEngine e1 = (MockEngine) this.networks.get(i).getEngine(channel1); + MockEngine e2 = (MockEngine) this.networks.get(i).getEngine(channel2); + if (i < networks.size() / 2) { + + Assertions.assertTrue(e1.hasReceived(entity) // only engine on channel-1 should receive it. + && !e2.hasReceived(entity)); + } else { + Assertions.assertFalse(e1.hasReceived(entity) || e2.hasReceived(entity)); + } + } + + } + + /** + * Test two engines sends different distinct entities over distinct channels other engines sequentially. + */ + @Test + void testUnicastOneToAll_SequentiallyTwoEngines() { + StubNetwork network1 = new StubNetwork(hub); + MockEngine a1 = new MockEngine(); + MockEngine a2 = new MockEngine(); + + Conduit c1 = network1.register(a1, channel1); + Conduit c2 = network1.register(a2, channel2); + + Entity entity1 = new EntityFixture(); + Entity entity2 = new EntityFixture(); + + for (StubNetwork network : networks) { + try { + c1.unicast(entity1, network.id()); + c2.unicast(entity2, network.id()); + MockEngine e1 = (MockEngine) network.getEngine(channel1); + MockEngine e2 = (MockEngine) network.getEngine(channel2); + Assertions.assertTrue(e1.hasReceived(entity1) && e2.hasReceived(entity2)); + Assertions.assertFalse(e2.hasReceived(entity1) || e1.hasReceived(entity2)); + + } catch (LightChainNetworkingException e) { + Assertions.fail(); + } + } + } +} diff --git a/src/test/java/networking/StubNetworkTest.java b/src/test/java/networking/StubNetworkTest.java index 2cefc2d3..95ce2e86 100644 --- a/src/test/java/networking/StubNetworkTest.java +++ b/src/test/java/networking/StubNetworkTest.java @@ -1,20 +1,236 @@ package networking; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import model.Entity; +import model.exceptions.LightChainNetworkingException; +import network.Conduit; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import unittest.fixtures.EntityFixture; + /** - * Encapsulates tests for stub network implementation. + * Encapsulates tests for the stubnetwork. */ public class StubNetworkTest { - // TODO: add a test for each of the following scenarios: - // Use mock engines. - // 1. Engine A (on one stub network) can send message to Engine B (on another stub network) through its StubNetwork, - // and the message is received by Engine B. - // 2. Engine A can CONCURRENTLY send 100 messages to Engine B through its StubNetwork, - // and ALL messages received by Engine B. - // 3. Extend case 2 with Engine B also sending a reply message to Engine A for each received messages and all replies - // are received by Engine A. - // 4. Engines A and B on one StubNetwork can CONCURRENTLY send 100 messages to Engines C and D on another StubNetwork - // (A -> C) and (B -> D), and each Engine only - // receives messages destinated for it (C receives all messages from A) and (D receives all messages from B). - // Note that A and C must be on the same channel, and B and B must be on another same channel. - // 5. Stub network throws an exception if an engine is registering itself on an already taken channel. -} + + private final String channel1 = "test-network-channel-1"; + private final String channel2 = "test-network-channel-2"; + + /** + * Engine A (on one stub network) can send message to Engine B (on another stub network) through its StubNetwork, + * and the message is received by Engine B. + */ + @Test + void testTwoStubNetworksTwoEngines() { + Hub hub = new Hub(); + StubNetwork networkA = new StubNetwork(hub); + MockEngine engineA = new MockEngine(); + Conduit conduitA = networkA.register(engineA, channel1); + + StubNetwork networkB = new StubNetwork(hub); + MockEngine engineB = new MockEngine(); + networkB.register(engineB, channel1); + + Entity entity = new EntityFixture(); + try { + conduitA.unicast(entity, networkB.id()); + } catch (LightChainNetworkingException e) { + Assertions.fail(); + } + Assertions.assertTrue(engineB.hasReceived(entity)); + } + + /** + * Engine A can CONCURRENTLY send 100 messages to Engine B through its StubNetwork, + * and ALL messages received by Engine B. + */ + @Test + void testTwoStubNetworksTwoEnginesConcurrentMessages() { + Hub hub = new Hub(); + + int concurrencyDegree = 100; + AtomicInteger threadError = new AtomicInteger(); + CountDownLatch countDownLatch = new CountDownLatch(concurrencyDegree); + Thread[] unicastThreads = new Thread[concurrencyDegree]; + + StubNetwork networkA = new StubNetwork(hub); + MockEngine engineA = new MockEngine(); + Conduit conduitA = networkA.register(engineA, channel1); + + StubNetwork networkB = new StubNetwork(hub); + MockEngine engineB = new MockEngine(); + networkB.register(engineB, channel1); + + for (int i = 0; i < concurrencyDegree; i++) { + unicastThreads[i] = new Thread(() -> { + Entity entity = new EntityFixture(); + try { + conduitA.unicast(entity, networkB.id()); + if (!engineB.hasReceived(entity)) { + threadError.getAndIncrement(); + } + countDownLatch.countDown(); + } catch (LightChainNetworkingException e) { + threadError.getAndIncrement(); + } + }); + } + + for (Thread t : unicastThreads) { + t.start(); + } + + try { + boolean doneOneTime = countDownLatch.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + Assertions.assertEquals(0, threadError.get()); + Assertions.assertEquals(concurrencyDegree, engineB.totalReceived()); + } + + /** + * Engine A can CONCURRENTLY send 100 messages to Engine B through its StubNetwork, + * and ALL messages received by Engine B. + * Engine B also sending a reply message to Engine A for each received messages and all replies + * are received by Engine A. + */ + @Test + void testTwoStubNetworksTwoEnginesReplyConcurrentMessages() { + Hub hub = new Hub(); + + int concurrencyDegree = 100; + AtomicInteger threadError = new AtomicInteger(); + CountDownLatch sendDone = new CountDownLatch(concurrencyDegree); + Thread[] unicastThreads = new Thread[concurrencyDegree]; + + StubNetwork networkA = new StubNetwork(hub); + MockEngine engineA = new MockEngine(); + Conduit conduitA = networkA.register(engineA, channel1); + + StubNetwork networkB = new StubNetwork(hub); + MockEngine engineB = new MockEngine(); + Conduit conduitB = networkB.register(engineB, channel1); + + for (int i = 0; i < concurrencyDegree; i++) { + unicastThreads[i] = new Thread(() -> { + Entity message = new EntityFixture(); + Entity reply = new EntityFixture(); + try { + // A -> B + conduitA.unicast(message, networkB.id()); + if (!engineB.hasReceived(message)) { + threadError.getAndIncrement(); + } + + // B -> A + conduitB.unicast(reply, networkA.id()); + if (!engineA.hasReceived(reply)) { + threadError.getAndIncrement(); + } + sendDone.countDown(); + } catch (LightChainNetworkingException e) { + threadError.getAndIncrement(); + } + }); + } + + for (Thread t : unicastThreads) { + t.start(); + } + + try { + boolean doneOneTime = sendDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + Assertions.assertEquals(0, threadError.get()); + } + + /** + * Engines A1 and A2 on one StubNetwork can CONCURRENTLY send 100 messages to Engines B1 and B2 on another StubNetwork + * (A1 -> B1) and (A2 -> B2), and each Engine only + * receives messages destinated for it (B1 receives all messages from A1) and (B2 receives all messages from A2). + */ + @Test + void testTwoStubNetworksFourEnginesConcurrentMessages() { + Hub hub = new Hub(); + + int concurrencyDegree = 100; + AtomicInteger threadError = new AtomicInteger(); + CountDownLatch sendDone = new CountDownLatch(concurrencyDegree); + Thread[] unicastThreads = new Thread[concurrencyDegree]; + + // network A + StubNetwork networkA = new StubNetwork(hub); + MockEngine engineA1 = new MockEngine(); + Conduit conduitA1 = networkA.register(engineA1, channel1); + + MockEngine engineA2 = new MockEngine(); + Conduit conduitA2 = networkA.register(engineA2, channel2); + + // network B + StubNetwork networkB = new StubNetwork(hub); + MockEngine engineB1 = new MockEngine(); + MockEngine engineB2 = new MockEngine(); + networkB.register(engineB1, channel1); + networkB.register(engineB2, channel2); + + for (int i = 0; i < concurrencyDegree; i++) { + unicastThreads[i] = new Thread(() -> { + Entity messageA1toB1 = new EntityFixture(); + Entity messageA2toB2 = new EntityFixture(); + try { + // A1 -> B1 + // A2 -> B2 + conduitA1.unicast(messageA1toB1, networkB.id()); + conduitA2.unicast(messageA2toB2, networkB.id()); + + if (!engineB1.hasReceived(messageA1toB1) + || engineB1.hasReceived(messageA2toB2) + || !engineB2.hasReceived(messageA2toB2) + || engineB2.hasReceived(messageA1toB1)) { + threadError.getAndIncrement(); + } + sendDone.countDown(); + } catch (LightChainNetworkingException e) { + threadError.getAndIncrement(); + } + }); + } + for (Thread t : unicastThreads) { + t.start(); + } + try { + boolean doneOneTime = sendDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + Assertions.assertEquals(0, threadError.get()); + } + + /** + * Stub network throws an exception if an engine is registering itself on an already taken channel. + */ + @Test + void testRegisterToOccupiedChannel() { + Hub hub = new Hub(); + + StubNetwork network1 = new StubNetwork(hub); + MockEngine a1 = new MockEngine(); + network1.register(a1, channel1); + MockEngine b1 = new MockEngine(); + try { + network1.register(b1, channel1); + Assertions.fail("fail! method was expected to throw an exception"); + } catch (IllegalStateException ignored) { + // ignored + } + } +} \ No newline at end of file From b3f6f3e862f550eb29aaebf7e38e5376e238176b Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Thu, 14 Apr 2022 11:24:54 +0300 Subject: [PATCH 02/31] TansactionsMapDb and tests are implemented. --- .../java/model/lightchain/Identifier.java | 3 +- .../java/model/lightchain/Identifiers.java | 3 +- .../java/model/lightchain/Transaction.java | 35 +- .../java/storage/mapdb/TransactionsMapDb.java | 124 +++++ src/test/java/storage/TransactionsTest.java | 525 ++++++++++++++++++ 5 files changed, 687 insertions(+), 3 deletions(-) create mode 100644 src/main/java/storage/mapdb/TransactionsMapDb.java diff --git a/src/main/java/model/lightchain/Identifier.java b/src/main/java/model/lightchain/Identifier.java index 26c546ff..7306ffbf 100644 --- a/src/main/java/model/lightchain/Identifier.java +++ b/src/main/java/model/lightchain/Identifier.java @@ -1,5 +1,6 @@ package model.lightchain; +import java.io.Serializable; import java.util.Arrays; import io.ipfs.multibase.Multibase; @@ -7,7 +8,7 @@ /** * Represents a 32-byte unique identifier for an entity. Normally is computed as the hash value of the entity. */ -public class Identifier { +public class Identifier implements Serializable { public static final int Size = 32; private final byte[] value; diff --git a/src/main/java/model/lightchain/Identifiers.java b/src/main/java/model/lightchain/Identifiers.java index 10e858d4..93e060c3 100644 --- a/src/main/java/model/lightchain/Identifiers.java +++ b/src/main/java/model/lightchain/Identifiers.java @@ -1,11 +1,12 @@ package model.lightchain; +import java.io.Serializable; import java.util.ArrayList; /** * Represents an aggregated type for identifiers. */ -public class Identifiers { +public class Identifiers implements Serializable { private final ArrayList identifiers; public Identifiers() { diff --git a/src/main/java/model/lightchain/Transaction.java b/src/main/java/model/lightchain/Transaction.java index ef0aab57..afae00ec 100644 --- a/src/main/java/model/lightchain/Transaction.java +++ b/src/main/java/model/lightchain/Transaction.java @@ -1,12 +1,16 @@ package model.lightchain; +import java.io.Serializable; + import model.codec.EntityType; import model.crypto.Signature; + + /** * Represents a LightChain transaction in form of a token transfer between a sender and receiver. */ -public class Transaction extends model.Entity { +public class Transaction extends model.Entity implements Serializable { /** * The identifier of a finalized block that this transaction refers to its snapshot. */ @@ -51,6 +55,35 @@ public Transaction(Identifier referenceBlockId, Identifier sender, Identifier re this.amount = amount; } + /** + * Return the HashCode. + * + * @return the hashcode. + */ + @Override + public int hashCode() { + return this.id().hashCode(); + } + + /** + * Returns true if objects are equal. + * + * @param o an transaction object. + * @return true if objects equal. + */ + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Transaction)) { + return false; + } + Transaction that = (Transaction) o; + + return this.id().equals(that.id()); + } + /** * Type of this entity. * diff --git a/src/main/java/storage/mapdb/TransactionsMapDb.java b/src/main/java/storage/mapdb/TransactionsMapDb.java new file mode 100644 index 00000000..5e4edbf2 --- /dev/null +++ b/src/main/java/storage/mapdb/TransactionsMapDb.java @@ -0,0 +1,124 @@ +package storage.mapdb; + +import java.util.ArrayList; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import model.lightchain.Identifier; +import model.lightchain.Transaction; +import org.mapdb.*; +import storage.Transactions; + +/** + * Implementation of Transactions interface. + */ +public class TransactionsMapDb implements Transactions { + private final DB db; + private final ReentrantReadWriteLock lock; + private static final String MAP_NAME = "transactions_map"; + private final HTreeMap transactionsMap; + + /** + * Creates TransactionsMapDb. + * + * @param filePath the path of the file. + */ + public TransactionsMapDb(String filePath) { + this.db = DBMaker.fileDB(filePath).make(); + this.lock = new ReentrantReadWriteLock(); + transactionsMap = this.db.hashMap(MAP_NAME) + .keySerializer(Serializer.BYTE_ARRAY) + .createOrOpen(); + } + /** + * Checks existence of a transaction on the database. + * + * @param transactionId Identifier of transaction. + * @return true if a transaction with that identifier exists, false otherwise. + */ + @Override + public boolean has(Identifier transactionId) { + boolean hasBoolean; + try { + lock.readLock().lock(); + hasBoolean = transactionsMap.containsKey(transactionId.getBytes()); + } finally { + lock.readLock().unlock(); + } + return hasBoolean; + } + + /** + * Adds transaction to the database. + * + * @param transaction given transaction to be added. + * @return true if transaction did not exist on the database, false if transaction is already in + * database. + */ + @Override + public boolean add(Transaction transaction) { + boolean addBoolean; + try { + lock.writeLock().lock(); + addBoolean = transactionsMap.putIfAbsentBoolean(transaction.id().getBytes(),transaction); + } finally { + lock.writeLock().unlock(); + } + return addBoolean; + } + + /** + * Removes transaction with given identifier. + * + * @param transactionId identifier of the transaction. + * @return true if transaction exists on database and removed successfully, false if transaction does not exist on + * database. + */ + @Override + public boolean remove(Identifier transactionId) { + boolean removeBoolean; + try { + lock.writeLock().lock(); + Transaction transaction = get(transactionId); + removeBoolean =transactionsMap.remove(transactionId.getBytes(), transaction); + }finally { + lock.writeLock().unlock(); + } + return removeBoolean; + } + + /** + * Returns the transaction with given identifier. + * + * @param transactionId identifier of the transaction. + * @return the transaction itself if exists and null otherwise. + */ + @Override + public Transaction get(Identifier transactionId) { + + lock.readLock().lock(); + Transaction transaction = (Transaction) transactionsMap.get(transactionId.getBytes()); + lock.readLock().unlock(); + return transaction; + } + + /** + * Returns all transactions stored in database. + * + * @return all transactions stored tranin database. + */ + @Override + public ArrayList all() { + ArrayList allTransactions = new ArrayList<>(); + for (Object transaction : transactionsMap.values()){ + allTransactions.add((Transaction) transaction); + } + return allTransactions; + } + + /** + * It closes the database. + */ + public void closeDb() { + db.close(); + } +} diff --git a/src/test/java/storage/TransactionsTest.java b/src/test/java/storage/TransactionsTest.java index 8b2257d5..43ce46a9 100644 --- a/src/test/java/storage/TransactionsTest.java +++ b/src/test/java/storage/TransactionsTest.java @@ -1,9 +1,33 @@ package storage; +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import model.lightchain.Transaction; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.testcontainers.shaded.org.apache.commons.io.FileUtils; +import storage.mapdb.TransactionsMapDb; +import unittest.fixtures.TransactionFixture; + /** * Encapsulates tests for transactions database. */ public class TransactionsTest { + + private static final String TEMP_DIR = "tempdir"; + private static final String TEMP_FILE = "tempfile.db"; + private Path tempdir; + private ArrayList allTransactions; + private TransactionsMapDb db; // TODO: implement a unit test for each of the following scenarios: // IMPORTANT NOTE: each test must have a separate instance of database, and the database MUST only created on a // temporary directory. @@ -31,4 +55,505 @@ public class TransactionsTest { // able to retrieve the transaction. // 6. Repeat test case 5 for concurrently adding transactions as well as concurrently querying the // database for has, and get. + + /** + * Set the tests up. + */ + @BeforeEach + void setUp() throws IOException { + Path currentRelativePath = Paths.get(""); + tempdir = Files.createTempDirectory(currentRelativePath, TEMP_DIR); + db = new TransactionsMapDb(tempdir.toAbsolutePath() + "/" + TEMP_FILE); + allTransactions = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + allTransactions.add(TransactionFixture.newTransaction(10)); + } + } + + /** + * Adding transactions sequentially. + * + * @throws IOException throw IOException. + */ + @Test + void sequentialAddTest() throws IOException { + for (Transaction transaction : allTransactions) { + Assertions.assertTrue(db.add(transaction)); + } + for (Transaction transaction : allTransactions) { + Assertions.assertTrue(db.has(transaction.id())); + } + ArrayList all = db.all(); + Assertions.assertEquals(all.size(), 10); + for (Transaction transaction : all) { + Assertions.assertTrue(allTransactions.contains(transaction)); + } + for (Transaction transaction : allTransactions) { + Assertions.assertTrue(allTransactions.contains(db.get(transaction.id()))); + } + db.closeDb(); + FileUtils.deleteDirectory(new File(tempdir.toString())); + } + + @Test + void concurrentAddTest() throws IOException { + int concurrencyDegree = 10; + + AtomicInteger threadError = new AtomicInteger(); + CountDownLatch addDone = new CountDownLatch(concurrencyDegree); + Thread[] addThreads = new Thread[concurrencyDegree]; + /* + Adding all transactions concurrently. + */ + for (int i = 0; i < allTransactions.size(); i++) { + int finalI = i; + addThreads[i] = new Thread(() -> { + if (!db.add(allTransactions.get(finalI))) { + threadError.getAndIncrement(); + } + addDone.countDown(); + }); + } + for (Thread t : addThreads) { + t.start(); + } + try { + boolean doneOneTime = addDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Checking correctness of insertion by Has. + */ + CountDownLatch hasDone = new CountDownLatch(concurrencyDegree); + Thread[] hasThreads = new Thread[concurrencyDegree]; + for (int i = 0; i < allTransactions.size(); i++) { + int finalI = i; + hasThreads[i] = new Thread(() -> { + if (!db.has((allTransactions.get(finalI)).id())) { + threadError.getAndIncrement(); + } + hasDone.countDown(); + }); + } + + for (Thread t : hasThreads) { + t.start(); + } + try { + boolean doneOneTime = hasDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + + /* + Checking correctness of insertion by GET. + */ + CountDownLatch getDone = new CountDownLatch(concurrencyDegree); + Thread[] getThreads = new Thread[concurrencyDegree]; + for (int i = 0; i < allTransactions.size(); i++) { + int finalI = i; + getThreads[i] = new Thread(() -> { + if (!allTransactions.contains(db.get(allTransactions.get(finalI).id()))) { + threadError.getAndIncrement(); + } + getDone.countDown(); + }); + } + + for (Thread t : getThreads) { + t.start(); + } + try { + boolean doneOneTime = getDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Retrieving all concurrently. + */ + CountDownLatch doneAll = new CountDownLatch(concurrencyDegree); + Thread[] allThreads = new Thread[concurrencyDegree]; + ArrayList all = db.all(); + for (int i = 0; i < all.size(); i++) { + int finalI = i; + allThreads[i] = new Thread(() -> { + if (!all.contains(allTransactions.get(finalI))) { + threadError.getAndIncrement(); + } + doneAll.countDown(); + }); + } + + for (Thread t : allThreads) { + t.start(); + } + try { + boolean doneOneTime = doneAll.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + + Assertions.assertEquals(0, threadError.get()); + db.closeDb(); + FileUtils.deleteDirectory(new File(tempdir.toString())); + } + + /** + * Add 10 new transaction, remove first 5 and test methods. + */ + @Test + void removeFirstFiveTest() throws IOException { + for (Transaction transaction : allTransactions) { + Assertions.assertTrue(db.add(transaction)); + } + for (int i = 0; i < 5; i++) { + Assertions.assertTrue(db.remove(allTransactions.get(i).id())); + } + for (int i = 0; i < 10; i++) { + if (i < 5) { + Assertions.assertFalse(db.has(allTransactions.get(i).id()) || db.all().contains(allTransactions.get(i))); + } else { + Assertions.assertTrue(db.has(allTransactions.get(i).id()) && db.all().contains(allTransactions.get(i))); + } + } + db.closeDb(); + FileUtils.deleteDirectory(new File(tempdir.toString())); + } + + /** + * Concurrent version of removeFirstFiveTest. + */ + @Test + void concurrentRemoveFirstFiveTest() throws IOException { + int concurrencyDegree = 10; + + AtomicInteger threadError = new AtomicInteger(); + CountDownLatch addDone = new CountDownLatch(concurrencyDegree); + Thread[] addThreads = new Thread[concurrencyDegree]; + /* + Adding all transactions concurrently. + */ + for (int i = 0; i < allTransactions.size(); i++) { + int finalI = i; + addThreads[i] = new Thread(() -> { + if (!db.add(allTransactions.get(finalI))) { + threadError.getAndIncrement(); + } + addDone.countDown(); + }); + } + for (Thread t : addThreads) { + t.start(); + } + try { + boolean doneOneTime = addDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Removing first 5 concurrently + */ + int removeTill = concurrencyDegree / 2; + CountDownLatch doneRemove = new CountDownLatch(removeTill); + Thread[] removeThreads = new Thread[removeTill]; + for (int i = 0; i < removeTill; i++) { + int finalI = i; + removeThreads[i] = new Thread(() -> { + if (!db.remove(allTransactions.get(finalI).id())) { + threadError.getAndIncrement(); + } + doneRemove.countDown(); + }); + } + + for (Thread t : removeThreads) { + t.start(); + } + try { + boolean doneOneTime = doneRemove.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + CountDownLatch doneHas = new CountDownLatch(concurrencyDegree); + Thread[] hasThreads = new Thread[concurrencyDegree]; + for (int i = 0; i < allTransactions.size(); i++) { + int finalI = i; + int finalI1 = i; + hasThreads[i] = new Thread(() -> { + if (allTransactions.indexOf(allTransactions.get(finalI)) < 5) { + if (db.has(allTransactions.get(finalI1).id())) { + threadError.getAndIncrement(); + } + } else { + if (!db.has(allTransactions.get(finalI).id())) { + threadError.getAndIncrement(); + } + } + doneHas.countDown(); + }); + } + + for (Thread t : hasThreads) { + t.start(); + } + try { + boolean doneOneTime = doneHas.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + CountDownLatch getDone = new CountDownLatch(concurrencyDegree / 2); + Thread[] getThreads = new Thread[concurrencyDegree / 2]; + for (int i = 0; i < concurrencyDegree / 2; i++) { + int finalI = i; + int finalI1 = i + 5; + getThreads[i] = new Thread(() -> { + if (!allTransactions.contains(db.get(allTransactions.get(finalI).id())) + || allTransactions.contains(db.get(allTransactions.get(finalI1).id()))) { + threadError.getAndIncrement(); + } + getDone.countDown(); + }); + } + + for (Thread t : getThreads) { + t.start(); + } + try { + boolean doneOneTime = getDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + db.closeDb(); + FileUtils.deleteDirectory(new File(tempdir.toString())); + } + + /** + * Add 10 identifiers already exist and return false expected. + */ + @Test + void duplicationTest() throws IOException { + /* + Firt part of the test + */ + for (Transaction transaction : allTransactions) { + Assertions.assertTrue(db.add(transaction)); + } + for (Transaction transaction : allTransactions) { + Assertions.assertTrue(db.has(transaction.id())); + } + ArrayList all = db.all(); + Assertions.assertEquals(all.size(), 10); + for (Transaction transaction : all) { + Assertions.assertTrue(allTransactions.contains(transaction)); + } + for (Transaction transaction : allTransactions) { + Assertions.assertTrue(allTransactions.contains(db.get(transaction.id()))); + } + for (Transaction transaction : allTransactions) { + Assertions.assertFalse(db.add(transaction)); + } + /* + After trying duplication, check again. + */ + for (Transaction transaction : allTransactions) { + Assertions.assertTrue(db.has(transaction.id())); + } + for (Transaction transaction : allTransactions) { + Assertions.assertTrue(allTransactions.contains(db.get(transaction.id()))); + } + db.closeDb(); + FileUtils.deleteDirectory(new File(tempdir.toString())); + } + /** + * Concurrent version of duplicationTest. + */ + @Test + void concurrentDuplicationTest() throws IOException { + int concurrencyDegree = 10; + + AtomicInteger threadError = new AtomicInteger(); + CountDownLatch addDone = new CountDownLatch(concurrencyDegree); + Thread[] addThreads = new Thread[concurrencyDegree]; + /* + Adding all transactions concurrently. + */ + for (int i = 0; i < allTransactions.size(); i++) { + int finalI = i; + addThreads[i] = new Thread(() -> { + if (!db.add(allTransactions.get(finalI))) { + threadError.getAndIncrement(); + } + addDone.countDown(); + }); + } + for (Thread t : addThreads) { + t.start(); + } + try { + boolean doneOneTime = addDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Checking correctness of insertion by Has. + */ + CountDownLatch hasDone = new CountDownLatch(concurrencyDegree); + Thread[] hasThreads = new Thread[concurrencyDegree]; + for (int i = 0; i < allTransactions.size(); i++) { + int finalI = i; + hasThreads[i] = new Thread(() -> { + if (!db.has((allTransactions.get(finalI)).id())) { + threadError.getAndIncrement(); + } + hasDone.countDown(); + }); + } + + for (Thread t : hasThreads) { + t.start(); + } + try { + boolean doneOneTime = hasDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + + /* + Checking correctness of insertion by Get. + */ + CountDownLatch getDone = new CountDownLatch(concurrencyDegree); + Thread[] getThreads = new Thread[concurrencyDegree]; + for (int i = 0; i < allTransactions.size(); i++) { + int finalI = i; + getThreads[i] = new Thread(() -> { + if (!allTransactions.contains(db.get(allTransactions.get(finalI).id()))) { + threadError.getAndIncrement(); + } + getDone.countDown(); + }); + } + + for (Thread t : getThreads) { + t.start(); + } + try { + boolean doneOneTime = getDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Retrieving all concurrently. + */ + CountDownLatch doneAll = new CountDownLatch(concurrencyDegree); + Thread[] allThreads = new Thread[concurrencyDegree]; + ArrayList all = db.all(); + for (int i = 0; i < all.size(); i++) { + int finalI = i; + allThreads[i] = new Thread(() -> { + if (!all.contains(allTransactions.get(finalI))) { + threadError.getAndIncrement(); + } + doneAll.countDown(); + }); + } + + for (Thread t : allThreads) { + t.start(); + } + try { + boolean doneOneTime = doneAll.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Adding existing transactions. + */ + CountDownLatch addDuplicateDone = new CountDownLatch(concurrencyDegree); + Thread[] addDuplicateThreads = new Thread[concurrencyDegree]; + /* + Adding all transactions concurrently. + */ + for (int i = 0; i < allTransactions.size(); i++) { + int finalI = i; + addDuplicateThreads[i] = new Thread(() -> { + if (db.add(allTransactions.get(finalI))) { + threadError.getAndIncrement(); + } + addDuplicateDone.countDown(); + }); + } + for (Thread t : addDuplicateThreads) { + t.start(); + } + try { + boolean doneOneTime = addDuplicateDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Checking correctness of insertion by Has again. + */ + CountDownLatch hasDone2 = new CountDownLatch(concurrencyDegree); + Thread[] hasThreads2 = new Thread[concurrencyDegree]; + for (int i = 0; i < allTransactions.size(); i++) { + int finalI = i; + hasThreads2[i] = new Thread(() -> { + if (!db.has((allTransactions.get(finalI)).id())) { + threadError.getAndIncrement(); + } + hasDone2.countDown(); + }); + } + + for (Thread t : hasThreads2) { + t.start(); + } + try { + boolean doneOneTime = hasDone2.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + + /* + Checking correctness of insertion by Get again. + */ + CountDownLatch getDone2 = new CountDownLatch(concurrencyDegree); + Thread[] getThreads2 = new Thread[concurrencyDegree]; + for (int i = 0; i < allTransactions.size(); i++) { + int finalI = i; + getThreads2[i] = new Thread(() -> { + if (!allTransactions.contains(db.get(allTransactions.get(finalI).id()))) { + threadError.getAndIncrement(); + } + getDone2.countDown(); + }); + } + + for (Thread t : getThreads2) { + t.start(); + } + try { + boolean doneOneTime = getDone2.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + db.closeDb(); + FileUtils.deleteDirectory(new File(tempdir.toString())); + } } From edc8bf7eb8c8e530c1949fbc96e0a24bbf2669af Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Thu, 14 Apr 2022 17:58:11 +0300 Subject: [PATCH 03/31] BlocksMapDb is implemented --- src/main/java/model/lightchain/Block.java | 18 +++ src/main/java/storage/mapdb/BlocksMapDb.java | 125 ++++++++++++++++++ .../java/storage/mapdb/TransactionsMapDb.java | 9 +- 3 files changed, 148 insertions(+), 4 deletions(-) create mode 100644 src/main/java/storage/mapdb/BlocksMapDb.java diff --git a/src/main/java/model/lightchain/Block.java b/src/main/java/model/lightchain/Block.java index 0001db36..ac3228bb 100644 --- a/src/main/java/model/lightchain/Block.java +++ b/src/main/java/model/lightchain/Block.java @@ -72,6 +72,24 @@ public Block(Identifier previousBlockId, this.height = height; } + @Override + public int hashCode() { + return this.id().hashCode(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Block)) { + return false; + } + Block that = (Block) o; + + return this.id().equals(that.id()); + } + /** * Type of this entity. * diff --git a/src/main/java/storage/mapdb/BlocksMapDb.java b/src/main/java/storage/mapdb/BlocksMapDb.java new file mode 100644 index 00000000..1f506b9f --- /dev/null +++ b/src/main/java/storage/mapdb/BlocksMapDb.java @@ -0,0 +1,125 @@ +package storage.mapdb; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Map; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import model.lightchain.Block; +import model.lightchain.Identifier; +import org.mapdb.BTreeMap; +import org.mapdb.DB; +import org.mapdb.DBMaker; +import org.mapdb.Serializer; +import org.mapdb.serializer.SerializerArrayTuple; +import storage.Blocks; + + +/** + * Implementation of Transactions interface. + */ +public class BlocksMapDb implements Blocks { + private final DB db; + private final ReentrantReadWriteLock lock; + private static final String MAP_NAME = "blocks_map"; + private final BTreeMap blocksMap; + + public BlocksMapDb(String filePath) { + this.db = DBMaker.fileDB(filePath).make(); + this.lock = new ReentrantReadWriteLock(); + blocksMap = (BTreeMap) this.db.treeMap(MAP_NAME) + .keySerializer(new SerializerArrayTuple(Serializer.BYTE_ARRAY,Serializer.INTEGER)) + .createOrOpen(); + } + + /** + * Checks existence of block on the database. + * + * @param blockId Identifier of block. + * @return true if a block with that identifier exists, false otherwise. + */ + @Override + public boolean has(Identifier blockId) { +for(Map.Entry entry : blocksMap.entrySet()){ + Object[] objects= entry.getKey(); + byte[] bytes = (byte[]) objects[0]; + if(Arrays.equals(bytes,blockId.getBytes())){ + return true; + } +}return false; + } + + /** + * Adds block to the database. + * + * @param block given block to be added. + * @return true if block did not exist on the database, false if block is already in + * database. + */ + @Override + public boolean add(Block block) { + + return blocksMap.putIfAbsentBoolean(new Object[]{block.id().getBytes(),block.getHeight()},block); + } + + /** + * Removes block with given identifier. + * + * @param blockId identifier of the block. + * @return true if block exists on database and removed successfully, false if block does not exist on + * database. + */ + @Override + public boolean remove(Identifier blockId) { + for(Object[] objects : blocksMap.keySet()){ + if(objects[0] == blockId.getBytes()){ + return blocksMap.remove(objects,blocksMap.get(objects)); + } + } + return false; + } + + /** + * Returns the block with given identifier. + * + * @param blockId identifier of the block. + * @return the block itself if exists and null otherwise. + */ + @Override + public Block byId(Identifier blockId) { + for(Object[] objects : blocksMap.keySet()){ + if(objects[0] == blockId.getBytes()){ + return blocksMap.get(objects); + } + } + return null; + } + + /** + * Returns the block with the given height. + * + * @param height height of the block. + * @return the block itself if exists and null otherwise. + */ + @Override + public Block atHeight(int height) { + for(Object[] objects : blocksMap.keySet()){ + if((Integer) objects[1] == height){ + return blocksMap.get(objects); + } + } + return null; + } + + /** + * Returns all blocks stored in database. + * + * @return all stored blocks in database. + */ + @Override + public ArrayList all() { + ArrayList allBlocks =new ArrayList<>(); + allBlocks.addAll(blocksMap.getValues()); + return allBlocks; + } +} diff --git a/src/main/java/storage/mapdb/TransactionsMapDb.java b/src/main/java/storage/mapdb/TransactionsMapDb.java index 5e4edbf2..78135de1 100644 --- a/src/main/java/storage/mapdb/TransactionsMapDb.java +++ b/src/main/java/storage/mapdb/TransactionsMapDb.java @@ -29,6 +29,7 @@ public TransactionsMapDb(String filePath) { .keySerializer(Serializer.BYTE_ARRAY) .createOrOpen(); } + /** * Checks existence of a transaction on the database. * @@ -59,7 +60,7 @@ public boolean add(Transaction transaction) { boolean addBoolean; try { lock.writeLock().lock(); - addBoolean = transactionsMap.putIfAbsentBoolean(transaction.id().getBytes(),transaction); + addBoolean = transactionsMap.putIfAbsentBoolean(transaction.id().getBytes(), transaction); } finally { lock.writeLock().unlock(); } @@ -79,8 +80,8 @@ public boolean remove(Identifier transactionId) { try { lock.writeLock().lock(); Transaction transaction = get(transactionId); - removeBoolean =transactionsMap.remove(transactionId.getBytes(), transaction); - }finally { + removeBoolean = transactionsMap.remove(transactionId.getBytes(), transaction); + } finally { lock.writeLock().unlock(); } return removeBoolean; @@ -109,7 +110,7 @@ public Transaction get(Identifier transactionId) { @Override public ArrayList all() { ArrayList allTransactions = new ArrayList<>(); - for (Object transaction : transactionsMap.values()){ + for (Object transaction : transactionsMap.values()) { allTransactions.add((Transaction) transaction); } return allTransactions; From b289d6015f0a90b207ff01e2320d297d7cc02919 Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Fri, 15 Apr 2022 08:49:16 +0300 Subject: [PATCH 04/31] BlocksMapDb is implemented but adding problem exist --- .../model/crypto/ecdsa/EcdsaSignature.java | 4 +- src/main/java/model/lightchain/Block.java | 4 +- src/main/java/storage/mapdb/BlocksMapDb.java | 37 ++++++++++---- src/test/java/storage/BlocksTest.java | 51 +++++++++++++++++++ 4 files changed, 85 insertions(+), 11 deletions(-) diff --git a/src/main/java/model/crypto/ecdsa/EcdsaSignature.java b/src/main/java/model/crypto/ecdsa/EcdsaSignature.java index 73b85fad..a1bc1b77 100644 --- a/src/main/java/model/crypto/ecdsa/EcdsaSignature.java +++ b/src/main/java/model/crypto/ecdsa/EcdsaSignature.java @@ -4,10 +4,12 @@ import model.crypto.Signature; import model.lightchain.Identifier; +import java.io.Serializable; + /** * ECDSA signature implementation with signer ID. */ -public class EcdsaSignature extends Signature { +public class EcdsaSignature extends Signature implements Serializable { public static final String ELLIPTIC_CURVE = "EC"; public static final String SIGN_ALG_SHA_3_256_WITH_ECDSA = "SHA3-256withECDSA"; diff --git a/src/main/java/model/lightchain/Block.java b/src/main/java/model/lightchain/Block.java index ac3228bb..ad9b8c24 100644 --- a/src/main/java/model/lightchain/Block.java +++ b/src/main/java/model/lightchain/Block.java @@ -3,10 +3,12 @@ import model.codec.EntityType; import model.crypto.Signature; +import java.io.Serializable; + /** * Represents a LightChain Block that encapsulates set of ValidatedTransaction(s). */ -public class Block extends model.Entity { +public class Block extends model.Entity implements Serializable { /** * Reference to the hash value of another block as its parent. */ diff --git a/src/main/java/storage/mapdb/BlocksMapDb.java b/src/main/java/storage/mapdb/BlocksMapDb.java index 1f506b9f..cf682b59 100644 --- a/src/main/java/storage/mapdb/BlocksMapDb.java +++ b/src/main/java/storage/mapdb/BlocksMapDb.java @@ -40,13 +40,12 @@ public BlocksMapDb(String filePath) { */ @Override public boolean has(Identifier blockId) { -for(Map.Entry entry : blocksMap.entrySet()){ - Object[] objects= entry.getKey(); - byte[] bytes = (byte[]) objects[0]; - if(Arrays.equals(bytes,blockId.getBytes())){ - return true; - } -}return false; + for(Object[] objects : blocksMap.keySet()){ + if(objects[0] == blockId.getBytes()){ + return true; + } + } + return false; } /** @@ -58,8 +57,23 @@ public boolean has(Identifier blockId) { */ @Override public boolean add(Block block) { + boolean addBool; + try { + lock.writeLock().lock(); + System.out.println("Block id BEFORE put :"+block.id()); + System.out.println("Block previousId BEFORE put :"+block.getPreviousBlockId()); + System.out.println("Block height BEFORE put :"+block.getHeight()); + Object[] objects = new Object[]{block.id().getBytes(),block.getHeight()}; + addBool= blocksMap.putIfAbsentBoolean(objects,block); + System.out.println("Block id AFTER put :"+blocksMap.get(objects).id()); + System.out.println("Block previousID AFTER put :"+blocksMap.get(objects).getPreviousBlockId()); + System.out.println("Block height AFTER put :"+blocksMap.get(objects).getHeight()); + System.out.println(); + } finally { + lock.writeLock().unlock(); + } + return addBool; - return blocksMap.putIfAbsentBoolean(new Object[]{block.id().getBytes(),block.getHeight()},block); } /** @@ -119,7 +133,12 @@ public Block atHeight(int height) { @Override public ArrayList all() { ArrayList allBlocks =new ArrayList<>(); - allBlocks.addAll(blocksMap.getValues()); + for(Block block : blocksMap.getValues()){ + allBlocks.add(block); + } return allBlocks; } + public void closeDb() { + db.close(); + } } diff --git a/src/test/java/storage/BlocksTest.java b/src/test/java/storage/BlocksTest.java index afcefb39..f40be8a3 100644 --- a/src/test/java/storage/BlocksTest.java +++ b/src/test/java/storage/BlocksTest.java @@ -1,9 +1,30 @@ package storage; +import model.lightchain.Block; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.testcontainers.shaded.org.apache.commons.io.FileUtils; +import storage.mapdb.BlocksMapDb; +import unittest.fixtures.BlockFixture; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; + /** * Encapsulates tests for block database. */ public class BlocksTest { + + private static final String TEMP_DIR = "tempdir"; + private static final String TEMP_FILE = "tempfile.db"; + private Path tempdir; + private ArrayList allBlocks; + private BlocksMapDb db; // TODO: implement a unit test for each of the following scenarios: // IMPORTANT NOTE: each test must have a separate instance of database, and the database MUST only created on a // temporary directory. @@ -33,4 +54,34 @@ public class BlocksTest { // able to retrieve the block. // 6. Repeat test case 5 for concurrently adding blocks as well as concurrently querying the // database for has, byId, and byHeight. + + /** + * Set the tests up. + */ + @BeforeEach + void setUp() throws IOException { + Path currentRelativePath = Paths.get(""); + tempdir = Files.createTempDirectory(currentRelativePath, TEMP_DIR); + db = new BlocksMapDb(tempdir.toAbsolutePath() + "/" + TEMP_FILE); + allBlocks = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + allBlocks.add(BlockFixture.newBlock()); + } + } + + /** + * Adding blocks sequentially. + * + * @throws IOException throw IOException. + */ + @Test + void sequentialAddTest() throws IOException { + for (Block block : allBlocks){ + db.add(block); + } + db.closeDb(); + FileUtils.deleteDirectory(new File(tempdir.toString())); + } + + } From fe69f97bd718462a87132ea54d443537a8c4df26 Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Fri, 15 Apr 2022 10:08:56 +0300 Subject: [PATCH 05/31] Distributed is implemented --- src/main/java/model/codec/EncodedEntity.java | 33 ++++- src/main/java/storage/Distributed.java | 2 +- src/main/java/storage/mapdb/Distributed.java | 127 +++++++++++++++++++ 3 files changed, 160 insertions(+), 2 deletions(-) create mode 100644 src/main/java/storage/mapdb/Distributed.java diff --git a/src/main/java/model/codec/EncodedEntity.java b/src/main/java/model/codec/EncodedEntity.java index 73e1afcb..1b96a92b 100644 --- a/src/main/java/model/codec/EncodedEntity.java +++ b/src/main/java/model/codec/EncodedEntity.java @@ -1,12 +1,43 @@ package model.codec; +import java.io.Serializable; +import java.util.Arrays; + /** * Represents an encapsulation around the byte representation of an entity accompanied by its original type. */ -public class EncodedEntity { +public class EncodedEntity implements Serializable { private final byte[] bytes; private final String type; + /** + * Hashcode of entity. + * + * @return hashcode of encodedentity. + */ + @Override + public int hashCode() { + return Arrays.hashCode(this.bytes); + } + + /** + * Check if objects are equal + * + * @param o encodedentity. + * @return true if equals. + */ + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (!(o instanceof EncodedEntity)) { + return false; + } + EncodedEntity that = (EncodedEntity) o; + return Arrays.equals(this.bytes, that.bytes); + } + // EncodedEntity(id.getBytes() || byte(i), "assignment") public EncodedEntity(byte[] bytes, String type) { this.bytes = bytes.clone(); diff --git a/src/main/java/storage/Distributed.java b/src/main/java/storage/Distributed.java index 936a68d5..c1143e6e 100644 --- a/src/main/java/storage/Distributed.java +++ b/src/main/java/storage/Distributed.java @@ -43,7 +43,7 @@ public interface Distributed { * @param e identifier of the entity. * @return the entity itself if exists and null otherwise. */ - Block get(Identifier e); + Entity get(Identifier e); /** * Returns all entities stored in database. diff --git a/src/main/java/storage/mapdb/Distributed.java b/src/main/java/storage/mapdb/Distributed.java new file mode 100644 index 00000000..90915fca --- /dev/null +++ b/src/main/java/storage/mapdb/Distributed.java @@ -0,0 +1,127 @@ +package storage.mapdb; + +import java.util.ArrayList; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import model.Entity; +import model.codec.EncodedEntity; +import model.lightchain.Identifier; +import modules.codec.JsonEncoder; +import org.mapdb.DB; +import org.mapdb.DBMaker; +import org.mapdb.HTreeMap; + +public class Distributed implements storage.Distributed { + private final JsonEncoder encoder = new JsonEncoder(); + private final DB db; + private final ReentrantReadWriteLock lock; + private static final String MAP_NAME = "distributed_map"; + private final HTreeMap distributedMap; + + /** + * Creates DistributedMapDb + */ + public Distributed(String filePath) { + this.db = DBMaker.fileDB(filePath).make(); + this.lock = new ReentrantReadWriteLock(); + distributedMap = this.db.hashMap(MAP_NAME) + .createOrOpen(); + } + + /** + * Checks existence of entity on the database. + * + * @param entityId Identifier of entity. + * @return true if a entity with that identifier exists, false otherwise. + */ + @Override + public boolean has(Identifier entityId) { + boolean hasBoolean; + try { + lock.readLock().lock(); + hasBoolean = distributedMap.containsKey(entityId.getBytes()); + } finally { + lock.readLock().unlock(); + } + return hasBoolean; + } + + /** + * Adds entity to the database. + * + * @param e given entity to be added. + * @return true if entity did not exist on the database, false if entity is already in + * database. + */ + @Override + public boolean add(Entity e) { + boolean addBoolean; + try { + lock.writeLock().lock(); + addBoolean = distributedMap.putIfAbsentBoolean(e.id().getBytes(), encoder.encode(e)); + } finally { + lock.writeLock().unlock(); + } + return addBoolean; + } + + /** + * Removes entity with given identifier. + * + * @param e identifier of the entity. + * @return true if entity exists on database and removed successfully, false if entity does not exist on + * database. + */ + @Override + public boolean remove(Entity e) { + boolean removeBoolean; + try { + lock.writeLock().lock(); + removeBoolean = distributedMap.remove(e.id().getBytes(), encoder.encode(e)); + } finally { + lock.writeLock().unlock(); + } + return removeBoolean; + } + + /** + * Returns the entity with given identifier. + * + * @param entityId identifier of the entity. + * @return the entity itself if exists and null otherwise. + */ + @Override + public Entity get(Identifier entityId) { + Entity decodedEntity = null; + + try { + lock.readLock().lock(); + EncodedEntity encodedEntity = (EncodedEntity) distributedMap.get(entityId.getBytes()); + assert encodedEntity != null; + decodedEntity = encoder.decode(encodedEntity); + } catch (ClassNotFoundException e) { + //throw new ClassNotFoundException("could not found the class"+e); + } finally { + lock.readLock().unlock(); + } + return decodedEntity; + } + + /** + * Returns all entities stored in database. + * + * @return all stored entities in database. + */ + @Override + public ArrayList all() { + ArrayList allEntities = new ArrayList<>(); + for (Object encodedEntity : distributedMap.values()) { + try { + allEntities.add(encoder.decode((EncodedEntity) encodedEntity)); + } catch (ClassNotFoundException e) { + //throw new ClassNotFoundException("could not found the class"+e); + } + } + return allEntities; + } +} From 7b0b8a313f36d0925b7714db7392e2cef30fe996 Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Fri, 15 Apr 2022 12:44:54 +0300 Subject: [PATCH 06/31] Proof of id remains same for transaction --- src/main/java/storage/mapdb/TransactionsMapDb.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/main/java/storage/mapdb/TransactionsMapDb.java b/src/main/java/storage/mapdb/TransactionsMapDb.java index 78135de1..c10e8512 100644 --- a/src/main/java/storage/mapdb/TransactionsMapDb.java +++ b/src/main/java/storage/mapdb/TransactionsMapDb.java @@ -60,7 +60,9 @@ public boolean add(Transaction transaction) { boolean addBoolean; try { lock.writeLock().lock(); + //System.out.println(transaction.id()); test if id remains same. addBoolean = transactionsMap.putIfAbsentBoolean(transaction.id().getBytes(), transaction); + //System.out.println(get(transaction.id()).id());test if id remains same. } finally { lock.writeLock().unlock(); } From aa211285ed3c7ff656a4365caf8acc58ddcc0a10 Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Fri, 15 Apr 2022 16:57:55 +0300 Subject: [PATCH 07/31] Change structure --- src/main/java/storage/mapdb/BlocksMapDb.java | 35 +++++++++++--------- src/test/java/storage/BlocksTest.java | 8 ++++- 2 files changed, 26 insertions(+), 17 deletions(-) diff --git a/src/main/java/storage/mapdb/BlocksMapDb.java b/src/main/java/storage/mapdb/BlocksMapDb.java index cf682b59..8f0c5990 100644 --- a/src/main/java/storage/mapdb/BlocksMapDb.java +++ b/src/main/java/storage/mapdb/BlocksMapDb.java @@ -3,6 +3,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Map; +import java.util.NavigableMap; import java.util.concurrent.locks.ReentrantReadWriteLock; import model.lightchain.Block; @@ -23,6 +24,7 @@ public class BlocksMapDb implements Blocks { private final ReentrantReadWriteLock lock; private static final String MAP_NAME = "blocks_map"; private final BTreeMap blocksMap; + private final byte[] idBytes; public BlocksMapDb(String filePath) { this.db = DBMaker.fileDB(filePath).make(); @@ -30,6 +32,7 @@ public BlocksMapDb(String filePath) { blocksMap = (BTreeMap) this.db.treeMap(MAP_NAME) .keySerializer(new SerializerArrayTuple(Serializer.BYTE_ARRAY,Serializer.INTEGER)) .createOrOpen(); + this.idBytes=null; } /** @@ -40,10 +43,10 @@ public BlocksMapDb(String filePath) { */ @Override public boolean has(Identifier blockId) { - for(Object[] objects : blocksMap.keySet()){ - if(objects[0] == blockId.getBytes()){ - return true; - } + NavigableMap blockNavigableMap =blocksMap.prefixSubMap(new Object[]{blockId.getBytes()}); + System.out.println(blockNavigableMap.values()); + if(!blockNavigableMap.isEmpty()){ + return true; } return false; } @@ -57,22 +60,24 @@ public boolean has(Identifier blockId) { */ @Override public boolean add(Block block) { - boolean addBool; + Boolean addBool; try { lock.writeLock().lock(); - System.out.println("Block id BEFORE put :"+block.id()); + /* System.out.println("Block id BEFORE put :"+block.id().getBytes()); System.out.println("Block previousId BEFORE put :"+block.getPreviousBlockId()); - System.out.println("Block height BEFORE put :"+block.getHeight()); + System.out.println("Block height BEFORE put :"+block.getHeight());*/ Object[] objects = new Object[]{block.id().getBytes(),block.getHeight()}; addBool= blocksMap.putIfAbsentBoolean(objects,block); + + /* System.out.println(blocksMap.getValues()); System.out.println("Block id AFTER put :"+blocksMap.get(objects).id()); System.out.println("Block previousID AFTER put :"+blocksMap.get(objects).getPreviousBlockId()); System.out.println("Block height AFTER put :"+blocksMap.get(objects).getHeight()); - System.out.println(); + System.out.println();*/ } finally { lock.writeLock().unlock(); } - return addBool; + return !addBool; } @@ -101,12 +106,8 @@ public boolean remove(Identifier blockId) { */ @Override public Block byId(Identifier blockId) { - for(Object[] objects : blocksMap.keySet()){ - if(objects[0] == blockId.getBytes()){ - return blocksMap.get(objects); - } - } - return null; + NavigableMap blockNavigableMap =blocksMap.prefixSubMap(new Object[]{blockId.getBytes()}); + return blockNavigableMap.firstEntry().getValue(); } /** @@ -117,11 +118,13 @@ public Block byId(Identifier blockId) { */ @Override public Block atHeight(int height) { + /*for(byte[] bytes :) for(Object[] objects : blocksMap.keySet()){ if((Integer) objects[1] == height){ return blocksMap.get(objects); } - } + }*/ + return null; } diff --git a/src/test/java/storage/BlocksTest.java b/src/test/java/storage/BlocksTest.java index f40be8a3..60517abd 100644 --- a/src/test/java/storage/BlocksTest.java +++ b/src/test/java/storage/BlocksTest.java @@ -77,7 +77,13 @@ void setUp() throws IOException { @Test void sequentialAddTest() throws IOException { for (Block block : allBlocks){ - db.add(block); + Assertions.assertTrue(db.add(block)); + } + for (Block block : allBlocks){ + Assertions.assertFalse(db.add(block)); + } + for (Block block : allBlocks){ + Assertions.assertTrue(db.has(block.id())); } db.closeDb(); FileUtils.deleteDirectory(new File(tempdir.toString())); From 3c8c0984991699bbc7e95c8157a614bea53129a4 Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Fri, 15 Apr 2022 17:01:43 +0300 Subject: [PATCH 08/31] Change structure --- src/main/java/storage/mapdb/BlocksMapDb.java | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/main/java/storage/mapdb/BlocksMapDb.java b/src/main/java/storage/mapdb/BlocksMapDb.java index 8f0c5990..d3132d8d 100644 --- a/src/main/java/storage/mapdb/BlocksMapDb.java +++ b/src/main/java/storage/mapdb/BlocksMapDb.java @@ -44,7 +44,6 @@ public BlocksMapDb(String filePath) { @Override public boolean has(Identifier blockId) { NavigableMap blockNavigableMap =blocksMap.prefixSubMap(new Object[]{blockId.getBytes()}); - System.out.println(blockNavigableMap.values()); if(!blockNavigableMap.isEmpty()){ return true; } @@ -63,17 +62,17 @@ public boolean add(Block block) { Boolean addBool; try { lock.writeLock().lock(); - /* System.out.println("Block id BEFORE put :"+block.id().getBytes()); + System.out.println("Block id BEFORE put :"+block.id()); System.out.println("Block previousId BEFORE put :"+block.getPreviousBlockId()); - System.out.println("Block height BEFORE put :"+block.getHeight());*/ + System.out.println("Block height BEFORE put :"+block.getHeight()); Object[] objects = new Object[]{block.id().getBytes(),block.getHeight()}; addBool= blocksMap.putIfAbsentBoolean(objects,block); - /* System.out.println(blocksMap.getValues()); + System.out.println("Block id AFTER put :"+blocksMap.get(objects).id()); System.out.println("Block previousID AFTER put :"+blocksMap.get(objects).getPreviousBlockId()); System.out.println("Block height AFTER put :"+blocksMap.get(objects).getHeight()); - System.out.println();*/ + System.out.println(); } finally { lock.writeLock().unlock(); } From 97a11d52594a88e48c9bd8efab85dfc32e9938bd Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Thu, 21 Apr 2022 21:24:16 +0300 Subject: [PATCH 09/31] Transactions and Blocks mapdb done. --- src/main/java/model/crypto/Signature.java | 4 +- src/main/java/model/lightchain/Block.java | 4 +- .../lightchain/ValidatedTransaction.java | 4 +- src/main/java/storage/mapdb/BlocksMapDb.java | 120 +-- src/test/java/storage/BlocksTest.java | 729 +++++++++++++++++- src/test/java/storage/TransactionsTest.java | 10 +- 6 files changed, 804 insertions(+), 67 deletions(-) diff --git a/src/main/java/model/crypto/Signature.java b/src/main/java/model/crypto/Signature.java index 6b0c50bf..fa9bc1c7 100644 --- a/src/main/java/model/crypto/Signature.java +++ b/src/main/java/model/crypto/Signature.java @@ -3,10 +3,12 @@ import model.Entity; import model.lightchain.Identifier; +import java.io.Serializable; + /** * Represents abstract data type for the cryptographic digital signature used in LightChain. */ -public abstract class Signature extends Entity { +public abstract class Signature extends Entity implements Serializable { /** * The signature value in bytes. */ diff --git a/src/main/java/model/lightchain/Block.java b/src/main/java/model/lightchain/Block.java index ad9b8c24..50b9c549 100644 --- a/src/main/java/model/lightchain/Block.java +++ b/src/main/java/model/lightchain/Block.java @@ -1,10 +1,10 @@ package model.lightchain; +import java.io.Serializable; + import model.codec.EntityType; import model.crypto.Signature; -import java.io.Serializable; - /** * Represents a LightChain Block that encapsulates set of ValidatedTransaction(s). */ diff --git a/src/main/java/model/lightchain/ValidatedTransaction.java b/src/main/java/model/lightchain/ValidatedTransaction.java index e60c932c..233a6faa 100644 --- a/src/main/java/model/lightchain/ValidatedTransaction.java +++ b/src/main/java/model/lightchain/ValidatedTransaction.java @@ -3,11 +3,13 @@ import model.codec.EntityType; import model.crypto.Signature; +import java.io.Serializable; + /** * A ValidatedTransaction is a wrapper around a Transaction that carries a proof of assigned validators that attests * the transaction passed local validation of validators. */ -public class ValidatedTransaction extends Transaction { +public class ValidatedTransaction extends Transaction implements Serializable { /** * Represents the signatures of assigned validators to this transaction. */ diff --git a/src/main/java/storage/mapdb/BlocksMapDb.java b/src/main/java/storage/mapdb/BlocksMapDb.java index d3132d8d..c9a57c50 100644 --- a/src/main/java/storage/mapdb/BlocksMapDb.java +++ b/src/main/java/storage/mapdb/BlocksMapDb.java @@ -1,38 +1,36 @@ package storage.mapdb; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Map; -import java.util.NavigableMap; import java.util.concurrent.locks.ReentrantReadWriteLock; import model.lightchain.Block; import model.lightchain.Identifier; -import org.mapdb.BTreeMap; -import org.mapdb.DB; -import org.mapdb.DBMaker; -import org.mapdb.Serializer; -import org.mapdb.serializer.SerializerArrayTuple; +import org.mapdb.*; import storage.Blocks; - /** * Implementation of Transactions interface. */ public class BlocksMapDb implements Blocks { - private final DB db; + private final DB dbID; + private final DB dbHeight; private final ReentrantReadWriteLock lock; - private static final String MAP_NAME = "blocks_map"; - private final BTreeMap blocksMap; - private final byte[] idBytes; + private static final String MAP_NAME_ID = "blocks_map_id"; + private static final String MAP_NAME_HEIGHT = "blocks_map_height"; + private final HTreeMap blocksIdMap; + private final HTreeMap >blocksHeightMap; + - public BlocksMapDb(String filePath) { - this.db = DBMaker.fileDB(filePath).make(); + public BlocksMapDb(String filePathId,String filePathHeight) { + this.dbID = DBMaker.fileDB(filePathId).make(); this.lock = new ReentrantReadWriteLock(); - blocksMap = (BTreeMap) this.db.treeMap(MAP_NAME) - .keySerializer(new SerializerArrayTuple(Serializer.BYTE_ARRAY,Serializer.INTEGER)) + blocksIdMap = this.dbID.hashMap(MAP_NAME_ID) + .keySerializer(Serializer.BYTE_ARRAY) + .createOrOpen(); + this.dbHeight = DBMaker.fileDB(filePathHeight).make(); + blocksHeightMap = (HTreeMap>) this.dbHeight.hashMap(MAP_NAME_HEIGHT) .createOrOpen(); - this.idBytes=null; + } /** @@ -43,11 +41,14 @@ public BlocksMapDb(String filePath) { */ @Override public boolean has(Identifier blockId) { - NavigableMap blockNavigableMap =blocksMap.prefixSubMap(new Object[]{blockId.getBytes()}); - if(!blockNavigableMap.isEmpty()){ - return true; + boolean hasBoolean; + try { + lock.readLock().lock(); + hasBoolean = blocksIdMap.containsKey(blockId.getBytes()); + } finally { + lock.readLock().unlock(); } - return false; + return hasBoolean; } /** @@ -59,24 +60,27 @@ public boolean has(Identifier blockId) { */ @Override public boolean add(Block block) { - Boolean addBool; + boolean addBooleanId; + Integer integer = block.getHeight(); try { lock.writeLock().lock(); - System.out.println("Block id BEFORE put :"+block.id()); - System.out.println("Block previousId BEFORE put :"+block.getPreviousBlockId()); - System.out.println("Block height BEFORE put :"+block.getHeight()); - Object[] objects = new Object[]{block.id().getBytes(),block.getHeight()}; - addBool= blocksMap.putIfAbsentBoolean(objects,block); - - - System.out.println("Block id AFTER put :"+blocksMap.get(objects).id()); - System.out.println("Block previousID AFTER put :"+blocksMap.get(objects).getPreviousBlockId()); - System.out.println("Block height AFTER put :"+blocksMap.get(objects).getHeight()); - System.out.println(); + addBooleanId = blocksIdMap.putIfAbsentBoolean(block.id().getBytes(), block); + if (addBooleanId){ + blocksHeightMap.compute(integer,(key,value)->{ + final ArrayList newBlockArray; + if(value == null){ + newBlockArray = new ArrayList<>(); + } else { + newBlockArray = new ArrayList<>(value); + } + newBlockArray.add(block.id()); + return newBlockArray; + }); + } } finally { lock.writeLock().unlock(); } - return !addBool; + return addBooleanId; } @@ -89,24 +93,33 @@ public boolean add(Block block) { */ @Override public boolean remove(Identifier blockId) { - for(Object[] objects : blocksMap.keySet()){ - if(objects[0] == blockId.getBytes()){ - return blocksMap.remove(objects,blocksMap.get(objects)); + boolean removeBoolean; + try { + lock.writeLock().lock(); + Block block = byId(blockId); + removeBoolean = blocksIdMap.remove(blockId.getBytes(), block); + if(removeBoolean){ + blocksHeightMap.get(block.getHeight()).remove(blockId); } + } finally { + lock.writeLock().unlock(); } - return false; + return removeBoolean; } /** * Returns the block with given identifier. - * + * t * @param blockId identifier of the block. * @return the block itself if exists and null otherwise. */ @Override public Block byId(Identifier blockId) { - NavigableMap blockNavigableMap =blocksMap.prefixSubMap(new Object[]{blockId.getBytes()}); - return blockNavigableMap.firstEntry().getValue(); + lock.readLock().lock(); + Block block = (Block) blocksIdMap.get(blockId.getBytes()); + + lock.readLock().unlock(); + return block; } /** @@ -117,14 +130,11 @@ public Block byId(Identifier blockId) { */ @Override public Block atHeight(int height) { - /*for(byte[] bytes :) - for(Object[] objects : blocksMap.keySet()){ - if((Integer) objects[1] == height){ - return blocksMap.get(objects); - } - }*/ - - return null; + lock.readLock().lock(); + Identifier identifier = blocksHeightMap.get(height).get(0); + Block block = byId(identifier); + lock.readLock().unlock(); + return block; } /** @@ -134,13 +144,15 @@ public Block atHeight(int height) { */ @Override public ArrayList all() { - ArrayList allBlocks =new ArrayList<>(); - for(Block block : blocksMap.getValues()){ - allBlocks.add(block); + ArrayList allBlocks = new ArrayList<>(); + for (Object block : blocksIdMap.values()) { + allBlocks.add((Block) block); } return allBlocks; } public void closeDb() { - db.close(); + dbID.close(); + dbHeight.close(); } + } diff --git a/src/test/java/storage/BlocksTest.java b/src/test/java/storage/BlocksTest.java index 60517abd..89dd6420 100644 --- a/src/test/java/storage/BlocksTest.java +++ b/src/test/java/storage/BlocksTest.java @@ -14,6 +14,9 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; /** * Encapsulates tests for block database. @@ -21,7 +24,8 @@ public class BlocksTest { private static final String TEMP_DIR = "tempdir"; - private static final String TEMP_FILE = "tempfile.db"; + private static final String TEMP_FILE_ID = "tempfileID.db"; + private static final String TEMP_FILE_HEIGHT = "tempfileHEIGHT.db"; private Path tempdir; private ArrayList allBlocks; private BlocksMapDb db; @@ -62,7 +66,7 @@ public class BlocksTest { void setUp() throws IOException { Path currentRelativePath = Paths.get(""); tempdir = Files.createTempDirectory(currentRelativePath, TEMP_DIR); - db = new BlocksMapDb(tempdir.toAbsolutePath() + "/" + TEMP_FILE); + db = new BlocksMapDb(tempdir.toAbsolutePath() + "/" + TEMP_FILE_ID, tempdir.toAbsolutePath() + "/" + TEMP_FILE_HEIGHT); allBlocks = new ArrayList<>(); for (int i = 0; i < 10; i++) { allBlocks.add(BlockFixture.newBlock()); @@ -76,18 +80,731 @@ void setUp() throws IOException { */ @Test void sequentialAddTest() throws IOException { - for (Block block : allBlocks){ + for (Block block : allBlocks) { Assertions.assertTrue(db.add(block)); } - for (Block block : allBlocks){ + for (Block block : allBlocks) { + Assertions.assertTrue(db.has(block.id())); + } + for (Block block : allBlocks) { + Assertions.assertTrue(allBlocks.contains(db.atHeight(block.getHeight()))); + } + for (Block block : allBlocks) { + Assertions.assertTrue(allBlocks.contains(db.byId(block.id()))); + } + ArrayList all = db.all(); + Assertions.assertEquals(all.size(), 10); + for (Block block : all) { + Assertions.assertTrue(allBlocks.contains(block)); + } + db.closeDb(); + FileUtils.deleteDirectory(new File(tempdir.toString())); + } + + /** + * Adding blocks concurrently. + */ + @Test + void concurrentAddTest() throws IOException { + int concurrencyDegree = 10; + + AtomicInteger threadError = new AtomicInteger(); + CountDownLatch addDone = new CountDownLatch(concurrencyDegree); + Thread[] addThreads = new Thread[concurrencyDegree]; + /* + Adding all blocks concurrently. + */ + for (int i = 0; i < allBlocks.size(); i++) { + int finalI = i; + addThreads[i] = new Thread(() -> { + if (!db.add(allBlocks.get(finalI))) { + threadError.getAndIncrement(); + } + addDone.countDown(); + }); + } + for (Thread t : addThreads) { + t.start(); + } + try { + boolean doneOneTime = addDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Checking correctness of insertion by Has. + */ + CountDownLatch hasDone = new CountDownLatch(concurrencyDegree); + Thread[] hasThreads = new Thread[concurrencyDegree]; + for (int i = 0; i < allBlocks.size(); i++) { + int finalI = i; + hasThreads[i] = new Thread(() -> { + if (!db.has((allBlocks.get(finalI)).id())) { + threadError.getAndIncrement(); + } + hasDone.countDown(); + }); + } + + for (Thread t : hasThreads) { + t.start(); + } + try { + boolean doneOneTime = hasDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Checking correctness of insertion byID. + */ + CountDownLatch getDone = new CountDownLatch(concurrencyDegree); + Thread[] getThreads = new Thread[concurrencyDegree]; + for (int i = 0; i < allBlocks.size(); i++) { + int finalI = i; + getThreads[i] = new Thread(() -> { + if (!allBlocks.contains(db.byId(allBlocks.get(finalI).id()))) { + threadError.getAndIncrement(); + } + getDone.countDown(); + }); + } + + for (Thread t : getThreads) { + t.start(); + } + try { + boolean doneOneTime = getDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Checking correctness of insertion by atHeight. + */ + CountDownLatch heightDone = new CountDownLatch(concurrencyDegree); + Thread[] heightThreats = new Thread[concurrencyDegree]; + for (int i = 0; i < allBlocks.size(); i++) { + int finalI = i; + heightThreats[i] = new Thread(() -> { + if (!allBlocks.contains(db.atHeight(allBlocks.get(finalI).getHeight()))) { + threadError.getAndIncrement(); + } + heightDone.countDown(); + }); + } + + for (Thread t : heightThreats) { + t.start(); + } + try { + boolean doneOneTime = heightDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Retrieving all concurrently. + */ + CountDownLatch doneAll = new CountDownLatch(concurrencyDegree); + Thread[] allThreads = new Thread[concurrencyDegree]; + ArrayList all = db.all(); + for (int i = 0; i < all.size(); i++) { + int finalI = i; + allThreads[i] = new Thread(() -> { + if (!all.contains(allBlocks.get(finalI))) { + threadError.getAndIncrement(); + } + doneAll.countDown(); + }); + } + + for (Thread t : allThreads) { + t.start(); + } + try { + boolean doneOneTime = doneAll.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + + Assertions.assertEquals(0, threadError.get()); + db.closeDb(); + FileUtils.deleteDirectory(new File(tempdir.toString())); + } + + /** + * Add 10 new blocks, remove first 5 and test methods. + */ + @Test + void removeFirstFiveTest() throws IOException { + for (Block block : allBlocks) { + Assertions.assertTrue(db.add(block)); + } + for (Block block : allBlocks) { + Assertions.assertTrue(db.has(block.id())); + } + for (Block block : allBlocks) { + Assertions.assertTrue(allBlocks.contains(db.atHeight(block.getHeight()))); + } + for (Block block : allBlocks) { + Assertions.assertTrue(allBlocks.contains(db.byId(block.id()))); + } + ArrayList all = db.all(); + Assertions.assertEquals(all.size(), 10); + for (Block block : all) { + Assertions.assertTrue(allBlocks.contains(block)); + } + for (int i = 0; i < 5; i++) { + Assertions.assertTrue(db.remove(allBlocks.get(i).id())); + } + for (int i = 0; i < 10; i++) { + if (i < 5) { + Assertions.assertFalse(db.has(allBlocks.get(i).id()) || db.all().contains(allBlocks.get(i))); + } else { + Assertions.assertTrue(db.has(allBlocks.get(i).id()) && db.all().contains(allBlocks.get(i)) + && db.all().contains(db.atHeight(allBlocks.get(i).getHeight())) && db.all().contains(db.byId(allBlocks.get(i).id()))); + } + } + db.closeDb(); + FileUtils.deleteDirectory(new File(tempdir.toString())); + } + + /** + * Concurrent version of removeFirstFiveTest. + */ + @Test + void concurrentRemoveFirstFiveTest() throws IOException { + int concurrencyDegree = 10; + AtomicInteger threadError = new AtomicInteger(); + CountDownLatch addDone = new CountDownLatch(concurrencyDegree); + Thread[] addThreads = new Thread[concurrencyDegree]; + /* + Adding all blocks concurrently. + */ + for (int i = 0; i < allBlocks.size(); i++) { + int finalI = i; + addThreads[i] = new Thread(() -> { + if (!db.add(allBlocks.get(finalI))) { + threadError.getAndIncrement(); + } + addDone.countDown(); + }); + } + for (Thread t : addThreads) { + t.start(); + } + try { + boolean doneOneTime = addDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Checking correctness of insertion by Has. + */ + CountDownLatch hasDone = new CountDownLatch(concurrencyDegree); + Thread[] hasThreads = new Thread[concurrencyDegree]; + for (int i = 0; i < allBlocks.size(); i++) { + int finalI = i; + hasThreads[i] = new Thread(() -> { + if (!db.has((allBlocks.get(finalI)).id())) { + threadError.getAndIncrement(); + } + hasDone.countDown(); + }); + } + for (Thread t : hasThreads) { + t.start(); + } + try { + boolean doneOneTime = hasDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Checking correctness of insertion byID. + */ + CountDownLatch getDone = new CountDownLatch(concurrencyDegree); + Thread[] getThreads = new Thread[concurrencyDegree]; + for (int i = 0; i < allBlocks.size(); i++) { + int finalI = i; + getThreads[i] = new Thread(() -> { + if (!allBlocks.contains(db.byId(allBlocks.get(finalI).id()))) { + threadError.getAndIncrement(); + } + getDone.countDown(); + }); + } + + for (Thread t : getThreads) { + t.start(); + } + try { + boolean doneOneTime = getDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Checking correctness of insertion by atHeight. + */ + CountDownLatch heightDone = new CountDownLatch(concurrencyDegree); + Thread[] heightThreats = new Thread[concurrencyDegree]; + for (int i = 0; i < allBlocks.size(); i++) { + int finalI = i; + heightThreats[i] = new Thread(() -> { + if (!allBlocks.contains(db.atHeight(allBlocks.get(finalI).getHeight()))) { + threadError.getAndIncrement(); + } + heightDone.countDown(); + }); + } + + for (Thread t : heightThreats) { + t.start(); + } + try { + boolean doneOneTime = heightDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Retrieving all concurrently. + */ + CountDownLatch doneAll = new CountDownLatch(concurrencyDegree); + Thread[] allThreads = new Thread[concurrencyDegree]; + ArrayList all = db.all(); + for (int i = 0; i < all.size(); i++) { + int finalI = i; + allThreads[i] = new Thread(() -> { + if (!all.contains(allBlocks.get(finalI))) { + threadError.getAndIncrement(); + } + doneAll.countDown(); + }); + } + for (Thread t : allThreads) { + t.start(); + } + try { + boolean doneOneTime = doneAll.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Removing first 5 concurrently + */ + int removeTill = concurrencyDegree / 2; + CountDownLatch doneRemove = new CountDownLatch(removeTill); + Thread[] removeThreads = new Thread[removeTill]; + for (int i = 0; i < removeTill; i++) { + int finalI = i; + removeThreads[i] = new Thread(() -> { + if (!db.remove(allBlocks.get(finalI).id())) { + threadError.getAndIncrement(); + } + doneRemove.countDown(); + }); + } + + for (Thread t : removeThreads) { + t.start(); + } + try { + boolean doneOneTime = doneRemove.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Check Has after removing first five blocks + */ + CountDownLatch doneHas = new CountDownLatch(concurrencyDegree / 2); + Thread[] hasThreads2 = new Thread[concurrencyDegree / 2]; + for (int i = 0; i < concurrencyDegree / 2; i++) { + int finalI = i; + hasThreads2[i] = new Thread(() -> { + if (allBlocks.indexOf(allBlocks.get(finalI)) < 5) { + if (db.has(allBlocks.get(finalI).id())) { + threadError.getAndIncrement(); + } + } else { + if (!db.has(allBlocks.get(finalI).id())) { + threadError.getAndIncrement(); + } + } + doneHas.countDown(); + }); + } + for (Thread t : hasThreads2) { + t.start(); + } + try { + boolean doneOneTime = doneHas.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Check byID after removing first five blocks + */ + CountDownLatch getById = new CountDownLatch(concurrencyDegree / 2); + Thread[] getThreadsById = new Thread[concurrencyDegree / 2]; + for (int i = 0; i < concurrencyDegree / 2; i++) { + int finalI = i; + int finalI1 = i + 5; + getThreadsById[i] = new Thread(() -> { + if (allBlocks.contains(db.byId(allBlocks.get(finalI).id())) + || !allBlocks.contains(db.byId(allBlocks.get(finalI1).id()))) { + System.out.println("here"); + threadError.getAndIncrement(); + } + getById.countDown(); + }); + } + + for (Thread t : getThreadsById) { + t.start(); + } + try { + boolean doneOneTime = getById.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Check atHeight after removing first five blocks + */ + CountDownLatch getByHeight = new CountDownLatch(concurrencyDegree / 2); + Thread[] getThreadsByHeight = new Thread[concurrencyDegree / 2]; + for (int i = 0; i < concurrencyDegree / 2; i++) { + int finalI = i; + int finalI1 = i + 5; + getThreadsByHeight[i] = new Thread(() -> { + if (allBlocks.contains(db.atHeight(allBlocks.get(finalI).getHeight())) + || !allBlocks.contains(db.atHeight(allBlocks.get(finalI1).getHeight()))) { + + threadError.getAndIncrement(); + } + getByHeight.countDown(); + }); + } + for (Thread t : getThreadsByHeight) { + t.start(); + } + try { + boolean doneOneTime = getByHeight.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + Assertions.assertEquals(0, threadError.get()); + db.closeDb(); + FileUtils.deleteDirectory(new File(tempdir.toString())); + } + + /** + * Add 10 blocks already exist and return false expected. + */ + @Test + void duplicationTest() throws IOException { + for (Block block : allBlocks) { + Assertions.assertTrue(db.add(block)); + } + for (Block block : allBlocks) { + Assertions.assertTrue(db.has(block.id())); + } + + for (Block block : allBlocks) { + Assertions.assertTrue(allBlocks.contains(db.atHeight(block.getHeight()))); + } + for (Block block : allBlocks) { + Assertions.assertTrue(allBlocks.contains(db.byId(block.id()))); + } + ArrayList all = db.all(); + Assertions.assertEquals(all.size(), 10); + for (Block block : all) { + Assertions.assertTrue(allBlocks.contains(block)); + } + for (Block block : allBlocks) { Assertions.assertFalse(db.add(block)); } - for (Block block : allBlocks){ - Assertions.assertTrue(db.has(block.id())); + /* + After trying duplication, check again. + */ + for (Block block : allBlocks) { + Assertions.assertTrue(db.has(block.id())); + } + + for (Block block : allBlocks) { + Assertions.assertTrue(allBlocks.contains(db.atHeight(block.getHeight()))); + } + for (Block block : allBlocks) { + Assertions.assertTrue(allBlocks.contains(db.byId(block.id()))); } db.closeDb(); FileUtils.deleteDirectory(new File(tempdir.toString())); } + /** + * Concurrent version of duplicationTest. + */ + @Test + void concurrentDuplicationTest() throws IOException { + int concurrencyDegree = 10; + + AtomicInteger threadError = new AtomicInteger(); + CountDownLatch addDone = new CountDownLatch(concurrencyDegree); + Thread[] addThreads = new Thread[concurrencyDegree]; + /* + Adding all blocks concurrently. + */ + for (int i = 0; i < allBlocks.size(); i++) { + int finalI = i; + addThreads[i] = new Thread(() -> { + if (!db.add(allBlocks.get(finalI))) { + threadError.getAndIncrement(); + } + addDone.countDown(); + }); + } + for (Thread t : addThreads) { + t.start(); + } + try { + boolean doneOneTime = addDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Checking correctness of insertion by Has. + */ + CountDownLatch hasDone = new CountDownLatch(concurrencyDegree); + Thread[] hasThreads = new Thread[concurrencyDegree]; + for (int i = 0; i < allBlocks.size(); i++) { + int finalI = i; + hasThreads[i] = new Thread(() -> { + if (!db.has((allBlocks.get(finalI)).id())) { + threadError.getAndIncrement(); + } + hasDone.countDown(); + }); + } + + for (Thread t : hasThreads) { + t.start(); + } + try { + boolean doneOneTime = hasDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Checking correctness of insertion byID. + */ + CountDownLatch getDone = new CountDownLatch(concurrencyDegree); + Thread[] getThreads = new Thread[concurrencyDegree]; + for (int i = 0; i < allBlocks.size(); i++) { + int finalI = i; + getThreads[i] = new Thread(() -> { + if (!allBlocks.contains(db.byId(allBlocks.get(finalI).id()))) { + threadError.getAndIncrement(); + } + getDone.countDown(); + }); + } + + for (Thread t : getThreads) { + t.start(); + } + try { + boolean doneOneTime = getDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Checking correctness of insertion by atHeight. + */ + CountDownLatch heightDone = new CountDownLatch(concurrencyDegree); + Thread[] heightThreats = new Thread[concurrencyDegree]; + for (int i = 0; i < allBlocks.size(); i++) { + int finalI = i; + heightThreats[i] = new Thread(() -> { + if (!allBlocks.contains(db.atHeight(allBlocks.get(finalI).getHeight()))) { + threadError.getAndIncrement(); + } + heightDone.countDown(); + }); + } + for (Thread t : heightThreats) { + t.start(); + } + try { + boolean doneOneTime = heightDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Retrieving all concurrently. + */ + CountDownLatch doneAll = new CountDownLatch(concurrencyDegree); + Thread[] allThreads = new Thread[concurrencyDegree]; + ArrayList all = db.all(); + for (int i = 0; i < all.size(); i++) { + int finalI = i; + allThreads[i] = new Thread(() -> { + if (!all.contains(allBlocks.get(finalI))) { + threadError.getAndIncrement(); + } + doneAll.countDown(); + }); + } + + for (Thread t : allThreads) { + t.start(); + } + try { + boolean doneOneTime = doneAll.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Adding existing blocks + */ + CountDownLatch addDuplicateDone = new CountDownLatch(concurrencyDegree); + Thread[] addDuplicateThreads = new Thread[concurrencyDegree]; + /* + Adding all blocks concurrently. + */ + for (int i = 0; i < allBlocks.size(); i++) { + int finalI = i; + addDuplicateThreads[i] = new Thread(() -> { + if (db.add(allBlocks.get(finalI))) { + threadError.getAndIncrement(); + } + addDuplicateDone.countDown(); + }); + } + for (Thread t : addDuplicateThreads) { + t.start(); + } + try { + boolean doneOneTime = addDuplicateDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Checking correctness of insertion by Has after duplication. + */ + CountDownLatch hasDone2 = new CountDownLatch(concurrencyDegree); + Thread[] hasThreads2 = new Thread[concurrencyDegree]; + for (int i = 0; i < allBlocks.size(); i++) { + int finalI = i; + hasThreads2[i] = new Thread(() -> { + if (!db.has((allBlocks.get(finalI)).id())) { + threadError.getAndIncrement(); + } + hasDone2.countDown(); + }); + } + + for (Thread t : hasThreads2) { + t.start(); + } + try { + boolean doneOneTime = hasDone2.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Checking correctness of insertion byID after duplication. + */ + CountDownLatch getDone2 = new CountDownLatch(concurrencyDegree); + Thread[] getThreads2 = new Thread[concurrencyDegree]; + for (int i = 0; i < allBlocks.size(); i++) { + int finalI = i; + getThreads2[i] = new Thread(() -> { + if (!allBlocks.contains(db.byId(allBlocks.get(finalI).id()))) { + threadError.getAndIncrement(); + } + getDone2.countDown(); + }); + } + + for (Thread t : getThreads2) { + t.start(); + } + try { + boolean doneOneTime = getDone2.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Checking correctness of insertion by atHeight after duplication. + */ + CountDownLatch heightDone2 = new CountDownLatch(concurrencyDegree); + Thread[] heightThreats2 = new Thread[concurrencyDegree]; + for (int i = 0; i < allBlocks.size(); i++) { + int finalI = i; + heightThreats2[i] = new Thread(() -> { + if (!allBlocks.contains(db.atHeight(allBlocks.get(finalI).getHeight()))) { + threadError.getAndIncrement(); + } + heightDone2.countDown(); + }); + } + + for (Thread t : heightThreats2) { + t.start(); + } + try { + boolean doneOneTime = heightDone2.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Retrieving all concurrently after duplication. + */ + CountDownLatch doneAll2 = new CountDownLatch(concurrencyDegree); + Thread[] allThreads2 = new Thread[concurrencyDegree]; + ArrayList all2 = db.all(); + for (int i = 0; i < all2.size(); i++) { + int finalI = i; + allThreads2[i] = new Thread(() -> { + if (!all2.contains(allBlocks.get(finalI))) { + threadError.getAndIncrement(); + } + doneAll2.countDown(); + }); + } + + for (Thread t : allThreads2) { + t.start(); + } + try { + boolean doneOneTime = doneAll2.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + Assertions.assertEquals(0, threadError.get()); + db.closeDb(); + FileUtils.deleteDirectory(new File(tempdir.toString())); + } } diff --git a/src/test/java/storage/TransactionsTest.java b/src/test/java/storage/TransactionsTest.java index 43ce46a9..c3133b92 100644 --- a/src/test/java/storage/TransactionsTest.java +++ b/src/test/java/storage/TransactionsTest.java @@ -315,8 +315,8 @@ void concurrentRemoveFirstFiveTest() throws IOException { int finalI = i; int finalI1 = i + 5; getThreads[i] = new Thread(() -> { - if (!allTransactions.contains(db.get(allTransactions.get(finalI).id())) - || allTransactions.contains(db.get(allTransactions.get(finalI1).id()))) { + if (allTransactions.contains(db.get(allTransactions.get(finalI).id())) + || !allTransactions.contains(db.get(allTransactions.get(finalI1).id()))) { threadError.getAndIncrement(); } getDone.countDown(); @@ -332,12 +332,13 @@ void concurrentRemoveFirstFiveTest() throws IOException { } catch (InterruptedException e) { Assertions.fail(); } + Assertions.assertEquals(0, threadError.get()); db.closeDb(); FileUtils.deleteDirectory(new File(tempdir.toString())); } /** - * Add 10 identifiers already exist and return false expected. + * Add 10 transaction already exist and return false expected. */ @Test void duplicationTest() throws IOException { @@ -370,9 +371,11 @@ void duplicationTest() throws IOException { for (Transaction transaction : allTransactions) { Assertions.assertTrue(allTransactions.contains(db.get(transaction.id()))); } + db.closeDb(); FileUtils.deleteDirectory(new File(tempdir.toString())); } + /** * Concurrent version of duplicationTest. */ @@ -553,6 +556,7 @@ void concurrentDuplicationTest() throws IOException { } catch (InterruptedException e) { Assertions.fail(); } + Assertions.assertEquals(0, threadError.get()); db.closeDb(); FileUtils.deleteDirectory(new File(tempdir.toString())); } From 433128cae9acc1d340ccaa3a3b14d5c91d725369 Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Thu, 21 Apr 2022 21:36:56 +0300 Subject: [PATCH 10/31] Style modifications --- src/main/java/model/crypto/Signature.java | 5 +-- .../java/model/lightchain/Transaction.java | 4 +- src/main/java/storage/mapdb/BlocksMapDb.java | 42 +++++++++++-------- 3 files changed, 28 insertions(+), 23 deletions(-) diff --git a/src/main/java/model/crypto/Signature.java b/src/main/java/model/crypto/Signature.java index fa9bc1c7..dbeea7db 100644 --- a/src/main/java/model/crypto/Signature.java +++ b/src/main/java/model/crypto/Signature.java @@ -1,10 +1,10 @@ package model.crypto; +import java.io.Serializable; + import model.Entity; import model.lightchain.Identifier; -import java.io.Serializable; - /** * Represents abstract data type for the cryptographic digital signature used in LightChain. */ @@ -13,7 +13,6 @@ public abstract class Signature extends Entity implements Serializable { * The signature value in bytes. */ private final byte[] bytes; - /** * Identifier of node that signed transaction. */ diff --git a/src/main/java/model/lightchain/Transaction.java b/src/main/java/model/lightchain/Transaction.java index afae00ec..902cc2a1 100644 --- a/src/main/java/model/lightchain/Transaction.java +++ b/src/main/java/model/lightchain/Transaction.java @@ -5,8 +5,6 @@ import model.codec.EntityType; import model.crypto.Signature; - - /** * Represents a LightChain transaction in form of a token transfer between a sender and receiver. */ @@ -76,7 +74,7 @@ public boolean equals(Object o) { if (this == o) { return true; } - if (!(o instanceof Transaction)) { + if (!(o instanceof Transaction)) { return false; } Transaction that = (Transaction) o; diff --git a/src/main/java/storage/mapdb/BlocksMapDb.java b/src/main/java/storage/mapdb/BlocksMapDb.java index c9a57c50..be2620dd 100644 --- a/src/main/java/storage/mapdb/BlocksMapDb.java +++ b/src/main/java/storage/mapdb/BlocksMapDb.java @@ -1,6 +1,7 @@ package storage.mapdb; import java.util.ArrayList; +import java.util.Objects; import java.util.concurrent.locks.ReentrantReadWriteLock; import model.lightchain.Block; @@ -12,25 +13,29 @@ * Implementation of Transactions interface. */ public class BlocksMapDb implements Blocks { - private final DB dbID; + private final DB dbId; private final DB dbHeight; private final ReentrantReadWriteLock lock; private static final String MAP_NAME_ID = "blocks_map_id"; private static final String MAP_NAME_HEIGHT = "blocks_map_height"; private final HTreeMap blocksIdMap; - private final HTreeMap >blocksHeightMap; + private final HTreeMap> blocksHeightMap; - - public BlocksMapDb(String filePathId,String filePathHeight) { - this.dbID = DBMaker.fileDB(filePathId).make(); + /** + * Creates blocks mapdb. + * + * @param filePathId of id,block mapdb. + * @param filePathHeight of height,id mapdb. + */ + public BlocksMapDb(String filePathId, String filePathHeight) { + this.dbId = DBMaker.fileDB(filePathId).make(); this.lock = new ReentrantReadWriteLock(); - blocksIdMap = this.dbID.hashMap(MAP_NAME_ID) + blocksIdMap = this.dbId.hashMap(MAP_NAME_ID) .keySerializer(Serializer.BYTE_ARRAY) .createOrOpen(); this.dbHeight = DBMaker.fileDB(filePathHeight).make(); blocksHeightMap = (HTreeMap>) this.dbHeight.hashMap(MAP_NAME_HEIGHT) .createOrOpen(); - } /** @@ -65,23 +70,22 @@ public boolean add(Block block) { try { lock.writeLock().lock(); addBooleanId = blocksIdMap.putIfAbsentBoolean(block.id().getBytes(), block); - if (addBooleanId){ - blocksHeightMap.compute(integer,(key,value)->{ + if (addBooleanId) { + blocksHeightMap.compute(integer, (key, value) -> { final ArrayList newBlockArray; - if(value == null){ + if (value == null) { newBlockArray = new ArrayList<>(); } else { newBlockArray = new ArrayList<>(value); } newBlockArray.add(block.id()); - return newBlockArray; + return newBlockArray; }); } } finally { lock.writeLock().unlock(); } return addBooleanId; - } /** @@ -98,8 +102,8 @@ public boolean remove(Identifier blockId) { lock.writeLock().lock(); Block block = byId(blockId); removeBoolean = blocksIdMap.remove(blockId.getBytes(), block); - if(removeBoolean){ - blocksHeightMap.get(block.getHeight()).remove(blockId); + if (removeBoolean) { + Objects.requireNonNull(blocksHeightMap.get(block.getHeight())).remove(blockId); } } finally { lock.writeLock().unlock(); @@ -110,6 +114,7 @@ public boolean remove(Identifier blockId) { /** * Returns the block with given identifier. * t + * * @param blockId identifier of the block. * @return the block itself if exists and null otherwise. */ @@ -131,7 +136,7 @@ public Block byId(Identifier blockId) { @Override public Block atHeight(int height) { lock.readLock().lock(); - Identifier identifier = blocksHeightMap.get(height).get(0); + Identifier identifier = Objects.requireNonNull(blocksHeightMap.get(height)).get(0); Block block = byId(identifier); lock.readLock().unlock(); return block; @@ -150,9 +155,12 @@ public ArrayList all() { } return allBlocks; } + + /** + * Close the db. + */ public void closeDb() { - dbID.close(); + dbId.close(); dbHeight.close(); } - } From c385d5572489af59df54d98f6bcfa2c421746a50 Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Thu, 21 Apr 2022 21:47:25 +0300 Subject: [PATCH 11/31] Style modifications --- src/main/java/storage/mapdb/BlocksMapDb.java | 14 +++++++------- .../java/storage/mapdb/TransactionsMapDb.java | 4 ++-- src/test/java/storage/BlocksTest.java | 18 +++++++++--------- src/test/java/storage/TransactionsTest.java | 12 ++++++------ 4 files changed, 24 insertions(+), 24 deletions(-) diff --git a/src/main/java/storage/mapdb/BlocksMapDb.java b/src/main/java/storage/mapdb/BlocksMapDb.java index be2620dd..a588a76e 100644 --- a/src/main/java/storage/mapdb/BlocksMapDb.java +++ b/src/main/java/storage/mapdb/BlocksMapDb.java @@ -19,7 +19,7 @@ public class BlocksMapDb implements Blocks { private static final String MAP_NAME_ID = "blocks_map_id"; private static final String MAP_NAME_HEIGHT = "blocks_map_height"; private final HTreeMap blocksIdMap; - private final HTreeMap> blocksHeightMap; + private final HTreeMap > blocksHeightMap; /** * Creates blocks mapdb. @@ -34,7 +34,7 @@ public BlocksMapDb(String filePathId, String filePathHeight) { .keySerializer(Serializer.BYTE_ARRAY) .createOrOpen(); this.dbHeight = DBMaker.fileDB(filePathHeight).make(); - blocksHeightMap = (HTreeMap>) this.dbHeight.hashMap(MAP_NAME_HEIGHT) + blocksHeightMap = (HTreeMap >) this.dbHeight.hashMap(MAP_NAME_HEIGHT) .createOrOpen(); } @@ -72,11 +72,11 @@ public boolean add(Block block) { addBooleanId = blocksIdMap.putIfAbsentBoolean(block.id().getBytes(), block); if (addBooleanId) { blocksHeightMap.compute(integer, (key, value) -> { - final ArrayList newBlockArray; + final ArrayList newBlockArray; if (value == null) { - newBlockArray = new ArrayList<>(); + newBlockArray = new ArrayList <>(); } else { - newBlockArray = new ArrayList<>(value); + newBlockArray = new ArrayList <>(value); } newBlockArray.add(block.id()); return newBlockArray; @@ -148,8 +148,8 @@ public Block atHeight(int height) { * @return all stored blocks in database. */ @Override - public ArrayList all() { - ArrayList allBlocks = new ArrayList<>(); + public ArrayList all() { + ArrayList allBlocks = new ArrayList <>(); for (Object block : blocksIdMap.values()) { allBlocks.add((Block) block); } diff --git a/src/main/java/storage/mapdb/TransactionsMapDb.java b/src/main/java/storage/mapdb/TransactionsMapDb.java index c10e8512..82c2316b 100644 --- a/src/main/java/storage/mapdb/TransactionsMapDb.java +++ b/src/main/java/storage/mapdb/TransactionsMapDb.java @@ -110,8 +110,8 @@ public Transaction get(Identifier transactionId) { * @return all transactions stored tranin database. */ @Override - public ArrayList all() { - ArrayList allTransactions = new ArrayList<>(); + public ArrayList all() { + ArrayList allTransactions = new ArrayList <>(); for (Object transaction : transactionsMap.values()) { allTransactions.add((Transaction) transaction); } diff --git a/src/test/java/storage/BlocksTest.java b/src/test/java/storage/BlocksTest.java index 89dd6420..c49de3eb 100644 --- a/src/test/java/storage/BlocksTest.java +++ b/src/test/java/storage/BlocksTest.java @@ -27,7 +27,7 @@ public class BlocksTest { private static final String TEMP_FILE_ID = "tempfileID.db"; private static final String TEMP_FILE_HEIGHT = "tempfileHEIGHT.db"; private Path tempdir; - private ArrayList allBlocks; + private ArrayList allBlocks; private BlocksMapDb db; // TODO: implement a unit test for each of the following scenarios: // IMPORTANT NOTE: each test must have a separate instance of database, and the database MUST only created on a @@ -67,7 +67,7 @@ void setUp() throws IOException { Path currentRelativePath = Paths.get(""); tempdir = Files.createTempDirectory(currentRelativePath, TEMP_DIR); db = new BlocksMapDb(tempdir.toAbsolutePath() + "/" + TEMP_FILE_ID, tempdir.toAbsolutePath() + "/" + TEMP_FILE_HEIGHT); - allBlocks = new ArrayList<>(); + allBlocks = new ArrayList <>(); for (int i = 0; i < 10; i++) { allBlocks.add(BlockFixture.newBlock()); } @@ -92,7 +92,7 @@ void sequentialAddTest() throws IOException { for (Block block : allBlocks) { Assertions.assertTrue(allBlocks.contains(db.byId(block.id()))); } - ArrayList all = db.all(); + ArrayList all = db.all(); Assertions.assertEquals(all.size(), 10); for (Block block : all) { Assertions.assertTrue(allBlocks.contains(block)); @@ -209,7 +209,7 @@ void concurrentAddTest() throws IOException { */ CountDownLatch doneAll = new CountDownLatch(concurrencyDegree); Thread[] allThreads = new Thread[concurrencyDegree]; - ArrayList all = db.all(); + ArrayList all = db.all(); for (int i = 0; i < all.size(); i++) { int finalI = i; allThreads[i] = new Thread(() -> { @@ -252,7 +252,7 @@ void removeFirstFiveTest() throws IOException { for (Block block : allBlocks) { Assertions.assertTrue(allBlocks.contains(db.byId(block.id()))); } - ArrayList all = db.all(); + ArrayList all = db.all(); Assertions.assertEquals(all.size(), 10); for (Block block : all) { Assertions.assertTrue(allBlocks.contains(block)); @@ -378,7 +378,7 @@ void concurrentRemoveFirstFiveTest() throws IOException { */ CountDownLatch doneAll = new CountDownLatch(concurrencyDegree); Thread[] allThreads = new Thread[concurrencyDegree]; - ArrayList all = db.all(); + ArrayList all = db.all(); for (int i = 0; i < all.size(); i++) { int finalI = i; allThreads[i] = new Thread(() -> { @@ -527,7 +527,7 @@ void duplicationTest() throws IOException { for (Block block : allBlocks) { Assertions.assertTrue(allBlocks.contains(db.byId(block.id()))); } - ArrayList all = db.all(); + ArrayList all = db.all(); Assertions.assertEquals(all.size(), 10); for (Block block : all) { Assertions.assertTrue(allBlocks.contains(block)); @@ -660,7 +660,7 @@ void concurrentDuplicationTest() throws IOException { */ CountDownLatch doneAll = new CountDownLatch(concurrencyDegree); Thread[] allThreads = new Thread[concurrencyDegree]; - ArrayList all = db.all(); + ArrayList all = db.all(); for (int i = 0; i < all.size(); i++) { int finalI = i; allThreads[i] = new Thread(() -> { @@ -783,7 +783,7 @@ void concurrentDuplicationTest() throws IOException { */ CountDownLatch doneAll2 = new CountDownLatch(concurrencyDegree); Thread[] allThreads2 = new Thread[concurrencyDegree]; - ArrayList all2 = db.all(); + ArrayList all2 = db.all(); for (int i = 0; i < all2.size(); i++) { int finalI = i; allThreads2[i] = new Thread(() -> { diff --git a/src/test/java/storage/TransactionsTest.java b/src/test/java/storage/TransactionsTest.java index c3133b92..94e0e9a4 100644 --- a/src/test/java/storage/TransactionsTest.java +++ b/src/test/java/storage/TransactionsTest.java @@ -26,7 +26,7 @@ public class TransactionsTest { private static final String TEMP_DIR = "tempdir"; private static final String TEMP_FILE = "tempfile.db"; private Path tempdir; - private ArrayList allTransactions; + private ArrayList allTransactions; private TransactionsMapDb db; // TODO: implement a unit test for each of the following scenarios: // IMPORTANT NOTE: each test must have a separate instance of database, and the database MUST only created on a @@ -64,7 +64,7 @@ void setUp() throws IOException { Path currentRelativePath = Paths.get(""); tempdir = Files.createTempDirectory(currentRelativePath, TEMP_DIR); db = new TransactionsMapDb(tempdir.toAbsolutePath() + "/" + TEMP_FILE); - allTransactions = new ArrayList<>(); + allTransactions = new ArrayList <>(); for (int i = 0; i < 10; i++) { allTransactions.add(TransactionFixture.newTransaction(10)); } @@ -83,7 +83,7 @@ void sequentialAddTest() throws IOException { for (Transaction transaction : allTransactions) { Assertions.assertTrue(db.has(transaction.id())); } - ArrayList all = db.all(); + ArrayList all = db.all(); Assertions.assertEquals(all.size(), 10); for (Transaction transaction : all) { Assertions.assertTrue(allTransactions.contains(transaction)); @@ -177,7 +177,7 @@ void concurrentAddTest() throws IOException { */ CountDownLatch doneAll = new CountDownLatch(concurrencyDegree); Thread[] allThreads = new Thread[concurrencyDegree]; - ArrayList all = db.all(); + ArrayList all = db.all(); for (int i = 0; i < all.size(); i++) { int finalI = i; allThreads[i] = new Thread(() -> { @@ -351,7 +351,7 @@ void duplicationTest() throws IOException { for (Transaction transaction : allTransactions) { Assertions.assertTrue(db.has(transaction.id())); } - ArrayList all = db.all(); + ArrayList all = db.all(); Assertions.assertEquals(all.size(), 10); for (Transaction transaction : all) { Assertions.assertTrue(allTransactions.contains(transaction)); @@ -461,7 +461,7 @@ void concurrentDuplicationTest() throws IOException { */ CountDownLatch doneAll = new CountDownLatch(concurrencyDegree); Thread[] allThreads = new Thread[concurrencyDegree]; - ArrayList all = db.all(); + ArrayList all = db.all(); for (int i = 0; i < all.size(); i++) { int finalI = i; allThreads[i] = new Thread(() -> { From edd814f0c7502ef66d3be1acd912c0c4aa08e3e3 Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Thu, 21 Apr 2022 21:57:49 +0300 Subject: [PATCH 12/31] Style modifications --- src/main/java/storage/mapdb/BlocksMapDb.java | 21 ++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/src/main/java/storage/mapdb/BlocksMapDb.java b/src/main/java/storage/mapdb/BlocksMapDb.java index a588a76e..7be9737a 100644 --- a/src/main/java/storage/mapdb/BlocksMapDb.java +++ b/src/main/java/storage/mapdb/BlocksMapDb.java @@ -103,7 +103,10 @@ public boolean remove(Identifier blockId) { Block block = byId(blockId); removeBoolean = blocksIdMap.remove(blockId.getBytes(), block); if (removeBoolean) { - Objects.requireNonNull(blocksHeightMap.get(block.getHeight())).remove(blockId); + ArrayList identifierArrayList = blocksHeightMap.get(block.getHeight()); + if (identifierArrayList != null) { + identifierArrayList.remove(blockId); + } } } finally { lock.writeLock().unlock(); @@ -122,7 +125,6 @@ public boolean remove(Identifier blockId) { public Block byId(Identifier blockId) { lock.readLock().lock(); Block block = (Block) blocksIdMap.get(blockId.getBytes()); - lock.readLock().unlock(); return block; } @@ -135,10 +137,17 @@ public Block byId(Identifier blockId) { */ @Override public Block atHeight(int height) { - lock.readLock().lock(); - Identifier identifier = Objects.requireNonNull(blocksHeightMap.get(height)).get(0); - Block block = byId(identifier); - lock.readLock().unlock(); + Block block = null; + try { + lock.readLock().lock(); + ArrayList identifierArrayList = blocksHeightMap.get(height); + if (identifierArrayList != null) { + Identifier identifier = identifierArrayList.get(0); + block = byId(identifier); + } + } finally { + lock.readLock().unlock(); + } return block; } From 0054cc1d77655f71b7ad44992c071c8fa38f5114 Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Thu, 21 Apr 2022 22:00:33 +0300 Subject: [PATCH 13/31] Style modifications --- src/main/java/storage/mapdb/BlocksMapDb.java | 18 +++++++++--------- src/test/java/storage/BlocksTest.java | 18 +++++++++--------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/src/main/java/storage/mapdb/BlocksMapDb.java b/src/main/java/storage/mapdb/BlocksMapDb.java index 7be9737a..87568088 100644 --- a/src/main/java/storage/mapdb/BlocksMapDb.java +++ b/src/main/java/storage/mapdb/BlocksMapDb.java @@ -19,7 +19,7 @@ public class BlocksMapDb implements Blocks { private static final String MAP_NAME_ID = "blocks_map_id"; private static final String MAP_NAME_HEIGHT = "blocks_map_height"; private final HTreeMap blocksIdMap; - private final HTreeMap > blocksHeightMap; + private final HTreeMap> blocksHeightMap; /** * Creates blocks mapdb. @@ -34,7 +34,7 @@ public BlocksMapDb(String filePathId, String filePathHeight) { .keySerializer(Serializer.BYTE_ARRAY) .createOrOpen(); this.dbHeight = DBMaker.fileDB(filePathHeight).make(); - blocksHeightMap = (HTreeMap >) this.dbHeight.hashMap(MAP_NAME_HEIGHT) + blocksHeightMap = (HTreeMap>) this.dbHeight.hashMap(MAP_NAME_HEIGHT) .createOrOpen(); } @@ -72,11 +72,11 @@ public boolean add(Block block) { addBooleanId = blocksIdMap.putIfAbsentBoolean(block.id().getBytes(), block); if (addBooleanId) { blocksHeightMap.compute(integer, (key, value) -> { - final ArrayList newBlockArray; + final ArrayList newBlockArray; if (value == null) { - newBlockArray = new ArrayList <>(); + newBlockArray = new ArrayList<>(); } else { - newBlockArray = new ArrayList <>(value); + newBlockArray = new ArrayList<>(value); } newBlockArray.add(block.id()); return newBlockArray; @@ -103,7 +103,7 @@ public boolean remove(Identifier blockId) { Block block = byId(blockId); removeBoolean = blocksIdMap.remove(blockId.getBytes(), block); if (removeBoolean) { - ArrayList identifierArrayList = blocksHeightMap.get(block.getHeight()); + ArrayList identifierArrayList = blocksHeightMap.get(block.getHeight()); if (identifierArrayList != null) { identifierArrayList.remove(blockId); } @@ -140,7 +140,7 @@ public Block atHeight(int height) { Block block = null; try { lock.readLock().lock(); - ArrayList identifierArrayList = blocksHeightMap.get(height); + ArrayList identifierArrayList = blocksHeightMap.get(height); if (identifierArrayList != null) { Identifier identifier = identifierArrayList.get(0); block = byId(identifier); @@ -157,8 +157,8 @@ public Block atHeight(int height) { * @return all stored blocks in database. */ @Override - public ArrayList all() { - ArrayList allBlocks = new ArrayList <>(); + public ArrayList all() { + ArrayList allBlocks = new ArrayList<>(); for (Object block : blocksIdMap.values()) { allBlocks.add((Block) block); } diff --git a/src/test/java/storage/BlocksTest.java b/src/test/java/storage/BlocksTest.java index c49de3eb..89dd6420 100644 --- a/src/test/java/storage/BlocksTest.java +++ b/src/test/java/storage/BlocksTest.java @@ -27,7 +27,7 @@ public class BlocksTest { private static final String TEMP_FILE_ID = "tempfileID.db"; private static final String TEMP_FILE_HEIGHT = "tempfileHEIGHT.db"; private Path tempdir; - private ArrayList allBlocks; + private ArrayList allBlocks; private BlocksMapDb db; // TODO: implement a unit test for each of the following scenarios: // IMPORTANT NOTE: each test must have a separate instance of database, and the database MUST only created on a @@ -67,7 +67,7 @@ void setUp() throws IOException { Path currentRelativePath = Paths.get(""); tempdir = Files.createTempDirectory(currentRelativePath, TEMP_DIR); db = new BlocksMapDb(tempdir.toAbsolutePath() + "/" + TEMP_FILE_ID, tempdir.toAbsolutePath() + "/" + TEMP_FILE_HEIGHT); - allBlocks = new ArrayList <>(); + allBlocks = new ArrayList<>(); for (int i = 0; i < 10; i++) { allBlocks.add(BlockFixture.newBlock()); } @@ -92,7 +92,7 @@ void sequentialAddTest() throws IOException { for (Block block : allBlocks) { Assertions.assertTrue(allBlocks.contains(db.byId(block.id()))); } - ArrayList all = db.all(); + ArrayList all = db.all(); Assertions.assertEquals(all.size(), 10); for (Block block : all) { Assertions.assertTrue(allBlocks.contains(block)); @@ -209,7 +209,7 @@ void concurrentAddTest() throws IOException { */ CountDownLatch doneAll = new CountDownLatch(concurrencyDegree); Thread[] allThreads = new Thread[concurrencyDegree]; - ArrayList all = db.all(); + ArrayList all = db.all(); for (int i = 0; i < all.size(); i++) { int finalI = i; allThreads[i] = new Thread(() -> { @@ -252,7 +252,7 @@ void removeFirstFiveTest() throws IOException { for (Block block : allBlocks) { Assertions.assertTrue(allBlocks.contains(db.byId(block.id()))); } - ArrayList all = db.all(); + ArrayList all = db.all(); Assertions.assertEquals(all.size(), 10); for (Block block : all) { Assertions.assertTrue(allBlocks.contains(block)); @@ -378,7 +378,7 @@ void concurrentRemoveFirstFiveTest() throws IOException { */ CountDownLatch doneAll = new CountDownLatch(concurrencyDegree); Thread[] allThreads = new Thread[concurrencyDegree]; - ArrayList all = db.all(); + ArrayList all = db.all(); for (int i = 0; i < all.size(); i++) { int finalI = i; allThreads[i] = new Thread(() -> { @@ -527,7 +527,7 @@ void duplicationTest() throws IOException { for (Block block : allBlocks) { Assertions.assertTrue(allBlocks.contains(db.byId(block.id()))); } - ArrayList all = db.all(); + ArrayList all = db.all(); Assertions.assertEquals(all.size(), 10); for (Block block : all) { Assertions.assertTrue(allBlocks.contains(block)); @@ -660,7 +660,7 @@ void concurrentDuplicationTest() throws IOException { */ CountDownLatch doneAll = new CountDownLatch(concurrencyDegree); Thread[] allThreads = new Thread[concurrencyDegree]; - ArrayList all = db.all(); + ArrayList all = db.all(); for (int i = 0; i < all.size(); i++) { int finalI = i; allThreads[i] = new Thread(() -> { @@ -783,7 +783,7 @@ void concurrentDuplicationTest() throws IOException { */ CountDownLatch doneAll2 = new CountDownLatch(concurrencyDegree); Thread[] allThreads2 = new Thread[concurrencyDegree]; - ArrayList all2 = db.all(); + ArrayList all2 = db.all(); for (int i = 0; i < all2.size(); i++) { int finalI = i; allThreads2[i] = new Thread(() -> { From 800e427b145d1c1de31e24d3c6b62a9a1bf2728a Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Thu, 21 Apr 2022 23:57:43 +0300 Subject: [PATCH 14/31] Style modifications --- .../java/model/lightchain/ValidatedBlock.java | 10 ++++ .../lightchain/ValidatedTransaction.java | 10 ++++ src/test/java/storage/BlocksTest.java | 46 +++++++++---------- src/test/java/storage/TransactionsTest.java | 24 +++++----- 4 files changed, 55 insertions(+), 35 deletions(-) diff --git a/src/main/java/model/lightchain/ValidatedBlock.java b/src/main/java/model/lightchain/ValidatedBlock.java index c47d5ef6..7b8150f4 100644 --- a/src/main/java/model/lightchain/ValidatedBlock.java +++ b/src/main/java/model/lightchain/ValidatedBlock.java @@ -36,6 +36,16 @@ public Signature[] getCertificates() { return certificates.clone(); } + @Override + public int hashCode() { + return super.hashCode(); + } + + @Override + public boolean equals(Object o) { + return super.equals(o); + } + @Override public String type() { return EntityType.TYPE_VALIDATED_TRANSACTION; diff --git a/src/main/java/model/lightchain/ValidatedTransaction.java b/src/main/java/model/lightchain/ValidatedTransaction.java index 233a6faa..b28475be 100644 --- a/src/main/java/model/lightchain/ValidatedTransaction.java +++ b/src/main/java/model/lightchain/ValidatedTransaction.java @@ -38,6 +38,16 @@ public Signature[] getCertificates() { return certificates.clone(); } + @Override + public int hashCode() { + return super.hashCode(); + } + + @Override + public boolean equals(Object o) { + return super.equals(o); + } + @Override public String type() { return EntityType.TYPE_VALIDATED_TRANSACTION; diff --git a/src/test/java/storage/BlocksTest.java b/src/test/java/storage/BlocksTest.java index 89dd6420..ed0739cf 100644 --- a/src/test/java/storage/BlocksTest.java +++ b/src/test/java/storage/BlocksTest.java @@ -111,9 +111,9 @@ void concurrentAddTest() throws IOException { AtomicInteger threadError = new AtomicInteger(); CountDownLatch addDone = new CountDownLatch(concurrencyDegree); Thread[] addThreads = new Thread[concurrencyDegree]; - /* + /* Adding all blocks concurrently. - */ + */ for (int i = 0; i < allBlocks.size(); i++) { int finalI = i; addThreads[i] = new Thread(() -> { @@ -134,7 +134,7 @@ void concurrentAddTest() throws IOException { } /* Checking correctness of insertion by Has. - */ + */ CountDownLatch hasDone = new CountDownLatch(concurrencyDegree); Thread[] hasThreads = new Thread[concurrencyDegree]; for (int i = 0; i < allBlocks.size(); i++) { @@ -156,9 +156,9 @@ void concurrentAddTest() throws IOException { } catch (InterruptedException e) { Assertions.fail(); } - /* + /* Checking correctness of insertion byID. - */ + */ CountDownLatch getDone = new CountDownLatch(concurrencyDegree); Thread[] getThreads = new Thread[concurrencyDegree]; for (int i = 0; i < allBlocks.size(); i++) { @@ -180,9 +180,9 @@ void concurrentAddTest() throws IOException { } catch (InterruptedException e) { Assertions.fail(); } - /* + /* Checking correctness of insertion by atHeight. - */ + */ CountDownLatch heightDone = new CountDownLatch(concurrencyDegree); Thread[] heightThreats = new Thread[concurrencyDegree]; for (int i = 0; i < allBlocks.size(); i++) { @@ -206,7 +206,7 @@ void concurrentAddTest() throws IOException { } /* Retrieving all concurrently. - */ + */ CountDownLatch doneAll = new CountDownLatch(concurrencyDegree); Thread[] allThreads = new Thread[concurrencyDegree]; ArrayList all = db.all(); @@ -281,9 +281,9 @@ void concurrentRemoveFirstFiveTest() throws IOException { AtomicInteger threadError = new AtomicInteger(); CountDownLatch addDone = new CountDownLatch(concurrencyDegree); Thread[] addThreads = new Thread[concurrencyDegree]; - /* + /* Adding all blocks concurrently. - */ + */ for (int i = 0; i < allBlocks.size(); i++) { int finalI = i; addThreads[i] = new Thread(() -> { @@ -304,7 +304,7 @@ void concurrentRemoveFirstFiveTest() throws IOException { } /* Checking correctness of insertion by Has. - */ + */ CountDownLatch hasDone = new CountDownLatch(concurrencyDegree); Thread[] hasThreads = new Thread[concurrencyDegree]; for (int i = 0; i < allBlocks.size(); i++) { @@ -325,9 +325,9 @@ void concurrentRemoveFirstFiveTest() throws IOException { } catch (InterruptedException e) { Assertions.fail(); } - /* + /* Checking correctness of insertion byID. - */ + */ CountDownLatch getDone = new CountDownLatch(concurrencyDegree); Thread[] getThreads = new Thread[concurrencyDegree]; for (int i = 0; i < allBlocks.size(); i++) { @@ -349,9 +349,9 @@ void concurrentRemoveFirstFiveTest() throws IOException { } catch (InterruptedException e) { Assertions.fail(); } - /* + /* Checking correctness of insertion by atHeight. - */ + */ CountDownLatch heightDone = new CountDownLatch(concurrencyDegree); Thread[] heightThreats = new Thread[concurrencyDegree]; for (int i = 0; i < allBlocks.size(); i++) { @@ -375,7 +375,7 @@ void concurrentRemoveFirstFiveTest() throws IOException { } /* Retrieving all concurrently. - */ + */ CountDownLatch doneAll = new CountDownLatch(concurrencyDegree); Thread[] allThreads = new Thread[concurrencyDegree]; ArrayList all = db.all(); @@ -478,7 +478,7 @@ void concurrentRemoveFirstFiveTest() throws IOException { } catch (InterruptedException e) { Assertions.fail(); } - /* + /* Check atHeight after removing first five blocks */ CountDownLatch getByHeight = new CountDownLatch(concurrencyDegree / 2); @@ -562,7 +562,7 @@ void concurrentDuplicationTest() throws IOException { AtomicInteger threadError = new AtomicInteger(); CountDownLatch addDone = new CountDownLatch(concurrencyDegree); Thread[] addThreads = new Thread[concurrencyDegree]; - /* + /* Adding all blocks concurrently. */ for (int i = 0; i < allBlocks.size(); i++) { @@ -607,7 +607,7 @@ void concurrentDuplicationTest() throws IOException { } catch (InterruptedException e) { Assertions.fail(); } - /* + /* Checking correctness of insertion byID. */ CountDownLatch getDone = new CountDownLatch(concurrencyDegree); @@ -631,7 +631,7 @@ void concurrentDuplicationTest() throws IOException { } catch (InterruptedException e) { Assertions.fail(); } - /* + /* Checking correctness of insertion by atHeight. */ CountDownLatch heightDone = new CountDownLatch(concurrencyDegree); @@ -706,7 +706,7 @@ void concurrentDuplicationTest() throws IOException { } catch (InterruptedException e) { Assertions.fail(); } - /* + /* Checking correctness of insertion by Has after duplication. */ CountDownLatch hasDone2 = new CountDownLatch(concurrencyDegree); @@ -730,7 +730,7 @@ void concurrentDuplicationTest() throws IOException { } catch (InterruptedException e) { Assertions.fail(); } - /* + /* Checking correctness of insertion byID after duplication. */ CountDownLatch getDone2 = new CountDownLatch(concurrencyDegree); @@ -754,7 +754,7 @@ void concurrentDuplicationTest() throws IOException { } catch (InterruptedException e) { Assertions.fail(); } - /* + /* Checking correctness of insertion by atHeight after duplication. */ CountDownLatch heightDone2 = new CountDownLatch(concurrencyDegree); diff --git a/src/test/java/storage/TransactionsTest.java b/src/test/java/storage/TransactionsTest.java index 94e0e9a4..84ca5790 100644 --- a/src/test/java/storage/TransactionsTest.java +++ b/src/test/java/storage/TransactionsTest.java @@ -26,7 +26,7 @@ public class TransactionsTest { private static final String TEMP_DIR = "tempdir"; private static final String TEMP_FILE = "tempfile.db"; private Path tempdir; - private ArrayList allTransactions; + private ArrayList allTransactions; private TransactionsMapDb db; // TODO: implement a unit test for each of the following scenarios: // IMPORTANT NOTE: each test must have a separate instance of database, and the database MUST only created on a @@ -64,7 +64,7 @@ void setUp() throws IOException { Path currentRelativePath = Paths.get(""); tempdir = Files.createTempDirectory(currentRelativePath, TEMP_DIR); db = new TransactionsMapDb(tempdir.toAbsolutePath() + "/" + TEMP_FILE); - allTransactions = new ArrayList <>(); + allTransactions = new ArrayList<>(); for (int i = 0; i < 10; i++) { allTransactions.add(TransactionFixture.newTransaction(10)); } @@ -83,7 +83,7 @@ void sequentialAddTest() throws IOException { for (Transaction transaction : allTransactions) { Assertions.assertTrue(db.has(transaction.id())); } - ArrayList all = db.all(); + ArrayList all = db.all(); Assertions.assertEquals(all.size(), 10); for (Transaction transaction : all) { Assertions.assertTrue(allTransactions.contains(transaction)); @@ -148,7 +148,7 @@ void concurrentAddTest() throws IOException { Assertions.fail(); } - /* + /* Checking correctness of insertion by GET. */ CountDownLatch getDone = new CountDownLatch(concurrencyDegree); @@ -177,7 +177,7 @@ void concurrentAddTest() throws IOException { */ CountDownLatch doneAll = new CountDownLatch(concurrencyDegree); Thread[] allThreads = new Thread[concurrencyDegree]; - ArrayList all = db.all(); + ArrayList all = db.all(); for (int i = 0; i < all.size(); i++) { int finalI = i; allThreads[i] = new Thread(() -> { @@ -342,8 +342,8 @@ void concurrentRemoveFirstFiveTest() throws IOException { */ @Test void duplicationTest() throws IOException { - /* - Firt part of the test + /* + Firt part of the test */ for (Transaction transaction : allTransactions) { Assertions.assertTrue(db.add(transaction)); @@ -351,7 +351,7 @@ void duplicationTest() throws IOException { for (Transaction transaction : allTransactions) { Assertions.assertTrue(db.has(transaction.id())); } - ArrayList all = db.all(); + ArrayList all = db.all(); Assertions.assertEquals(all.size(), 10); for (Transaction transaction : all) { Assertions.assertTrue(allTransactions.contains(transaction)); @@ -386,7 +386,7 @@ void concurrentDuplicationTest() throws IOException { AtomicInteger threadError = new AtomicInteger(); CountDownLatch addDone = new CountDownLatch(concurrencyDegree); Thread[] addThreads = new Thread[concurrencyDegree]; - /* + /* Adding all transactions concurrently. */ for (int i = 0; i < allTransactions.size(); i++) { @@ -432,7 +432,7 @@ void concurrentDuplicationTest() throws IOException { Assertions.fail(); } - /* + /* Checking correctness of insertion by Get. */ CountDownLatch getDone = new CountDownLatch(concurrencyDegree); @@ -461,7 +461,7 @@ void concurrentDuplicationTest() throws IOException { */ CountDownLatch doneAll = new CountDownLatch(concurrencyDegree); Thread[] allThreads = new Thread[concurrencyDegree]; - ArrayList all = db.all(); + ArrayList all = db.all(); for (int i = 0; i < all.size(); i++) { int finalI = i; allThreads[i] = new Thread(() -> { @@ -532,7 +532,7 @@ void concurrentDuplicationTest() throws IOException { Assertions.fail(); } - /* + /* Checking correctness of insertion by Get again. */ CountDownLatch getDone2 = new CountDownLatch(concurrencyDegree); From 18f49a6d453074a512bf2e613c4eb66099748e2d Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Fri, 22 Apr 2022 00:03:34 +0300 Subject: [PATCH 15/31] Style modifications --- .../model/crypto/ecdsa/EcdsaSignature.java | 3 ++- .../lightchain/ValidatedTransaction.java | 3 ++- .../java/storage/mapdb/TransactionsMapDb.java | 4 +-- src/test/java/storage/BlocksTest.java | 27 ++++++++++--------- 4 files changed, 21 insertions(+), 16 deletions(-) diff --git a/src/main/java/model/crypto/ecdsa/EcdsaSignature.java b/src/main/java/model/crypto/ecdsa/EcdsaSignature.java index a1bc1b77..3596820a 100644 --- a/src/main/java/model/crypto/ecdsa/EcdsaSignature.java +++ b/src/main/java/model/crypto/ecdsa/EcdsaSignature.java @@ -1,10 +1,11 @@ package model.crypto.ecdsa; +import java.io.Serializable; + import model.codec.EntityType; import model.crypto.Signature; import model.lightchain.Identifier; -import java.io.Serializable; /** * ECDSA signature implementation with signer ID. diff --git a/src/main/java/model/lightchain/ValidatedTransaction.java b/src/main/java/model/lightchain/ValidatedTransaction.java index b28475be..b57b3327 100644 --- a/src/main/java/model/lightchain/ValidatedTransaction.java +++ b/src/main/java/model/lightchain/ValidatedTransaction.java @@ -1,9 +1,10 @@ package model.lightchain; +import java.io.Serializable; + import model.codec.EntityType; import model.crypto.Signature; -import java.io.Serializable; /** * A ValidatedTransaction is a wrapper around a Transaction that carries a proof of assigned validators that attests diff --git a/src/main/java/storage/mapdb/TransactionsMapDb.java b/src/main/java/storage/mapdb/TransactionsMapDb.java index 82c2316b..c10e8512 100644 --- a/src/main/java/storage/mapdb/TransactionsMapDb.java +++ b/src/main/java/storage/mapdb/TransactionsMapDb.java @@ -110,8 +110,8 @@ public Transaction get(Identifier transactionId) { * @return all transactions stored tranin database. */ @Override - public ArrayList all() { - ArrayList allTransactions = new ArrayList <>(); + public ArrayList all() { + ArrayList allTransactions = new ArrayList<>(); for (Object transaction : transactionsMap.values()) { allTransactions.add((Transaction) transaction); } diff --git a/src/test/java/storage/BlocksTest.java b/src/test/java/storage/BlocksTest.java index ed0739cf..ebb22c95 100644 --- a/src/test/java/storage/BlocksTest.java +++ b/src/test/java/storage/BlocksTest.java @@ -1,13 +1,5 @@ package storage; -import model.lightchain.Block; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.testcontainers.shaded.org.apache.commons.io.FileUtils; -import storage.mapdb.BlocksMapDb; -import unittest.fixtures.BlockFixture; - import java.io.File; import java.io.IOException; import java.nio.file.Files; @@ -18,6 +10,15 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import model.lightchain.Block; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.testcontainers.shaded.org.apache.commons.io.FileUtils; +import storage.mapdb.BlocksMapDb; +import unittest.fixtures.BlockFixture; + + /** * Encapsulates tests for block database. */ @@ -66,7 +67,8 @@ public class BlocksTest { void setUp() throws IOException { Path currentRelativePath = Paths.get(""); tempdir = Files.createTempDirectory(currentRelativePath, TEMP_DIR); - db = new BlocksMapDb(tempdir.toAbsolutePath() + "/" + TEMP_FILE_ID, tempdir.toAbsolutePath() + "/" + TEMP_FILE_HEIGHT); + db = new BlocksMapDb(tempdir.toAbsolutePath() + "/" + TEMP_FILE_ID, + tempdir.toAbsolutePath() + "/" + TEMP_FILE_HEIGHT); allBlocks = new ArrayList<>(); for (int i = 0; i < 10; i++) { allBlocks.add(BlockFixture.newBlock()); @@ -265,7 +267,8 @@ void removeFirstFiveTest() throws IOException { Assertions.assertFalse(db.has(allBlocks.get(i).id()) || db.all().contains(allBlocks.get(i))); } else { Assertions.assertTrue(db.has(allBlocks.get(i).id()) && db.all().contains(allBlocks.get(i)) - && db.all().contains(db.atHeight(allBlocks.get(i).getHeight())) && db.all().contains(db.byId(allBlocks.get(i).id()))); + && db.all().contains(db.atHeight(allBlocks.get(i).getHeight())) + && db.all().contains(db.byId(allBlocks.get(i).id()))); } } db.closeDb(); @@ -657,7 +660,7 @@ void concurrentDuplicationTest() throws IOException { } /* Retrieving all concurrently. - */ + */ CountDownLatch doneAll = new CountDownLatch(concurrencyDegree); Thread[] allThreads = new Thread[concurrencyDegree]; ArrayList all = db.all(); @@ -685,7 +688,7 @@ void concurrentDuplicationTest() throws IOException { */ CountDownLatch addDuplicateDone = new CountDownLatch(concurrencyDegree); Thread[] addDuplicateThreads = new Thread[concurrencyDegree]; - /* + /* Adding all blocks concurrently. */ for (int i = 0; i < allBlocks.size(); i++) { From 24d372747f351326c64b7e4d37385694876c16f9 Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Fri, 22 Apr 2022 00:08:22 +0300 Subject: [PATCH 16/31] Style modifications --- .../java/model/crypto/ecdsa/EcdsaSignature.java | 3 +-- .../model/lightchain/ValidatedTransaction.java | 1 - src/main/java/storage/mapdb/BlocksMapDb.java | 1 - src/test/java/storage/BlocksTest.java | 1 - src/test/java/storage/TransactionsTest.java | 16 ++++++++-------- 5 files changed, 9 insertions(+), 13 deletions(-) diff --git a/src/main/java/model/crypto/ecdsa/EcdsaSignature.java b/src/main/java/model/crypto/ecdsa/EcdsaSignature.java index 3596820a..17aa7f98 100644 --- a/src/main/java/model/crypto/ecdsa/EcdsaSignature.java +++ b/src/main/java/model/crypto/ecdsa/EcdsaSignature.java @@ -6,8 +6,7 @@ import model.crypto.Signature; import model.lightchain.Identifier; - -/** + /** * ECDSA signature implementation with signer ID. */ public class EcdsaSignature extends Signature implements Serializable { diff --git a/src/main/java/model/lightchain/ValidatedTransaction.java b/src/main/java/model/lightchain/ValidatedTransaction.java index b57b3327..3660b9e8 100644 --- a/src/main/java/model/lightchain/ValidatedTransaction.java +++ b/src/main/java/model/lightchain/ValidatedTransaction.java @@ -5,7 +5,6 @@ import model.codec.EntityType; import model.crypto.Signature; - /** * A ValidatedTransaction is a wrapper around a Transaction that carries a proof of assigned validators that attests * the transaction passed local validation of validators. diff --git a/src/main/java/storage/mapdb/BlocksMapDb.java b/src/main/java/storage/mapdb/BlocksMapDb.java index 87568088..f9ca8d50 100644 --- a/src/main/java/storage/mapdb/BlocksMapDb.java +++ b/src/main/java/storage/mapdb/BlocksMapDb.java @@ -1,7 +1,6 @@ package storage.mapdb; import java.util.ArrayList; -import java.util.Objects; import java.util.concurrent.locks.ReentrantReadWriteLock; import model.lightchain.Block; diff --git a/src/test/java/storage/BlocksTest.java b/src/test/java/storage/BlocksTest.java index ebb22c95..70f347fb 100644 --- a/src/test/java/storage/BlocksTest.java +++ b/src/test/java/storage/BlocksTest.java @@ -18,7 +18,6 @@ import storage.mapdb.BlocksMapDb; import unittest.fixtures.BlockFixture; - /** * Encapsulates tests for block database. */ diff --git a/src/test/java/storage/TransactionsTest.java b/src/test/java/storage/TransactionsTest.java index 84ca5790..37658c2a 100644 --- a/src/test/java/storage/TransactionsTest.java +++ b/src/test/java/storage/TransactionsTest.java @@ -102,7 +102,7 @@ void concurrentAddTest() throws IOException { AtomicInteger threadError = new AtomicInteger(); CountDownLatch addDone = new CountDownLatch(concurrencyDegree); Thread[] addThreads = new Thread[concurrencyDegree]; - /* + /* Adding all transactions concurrently. */ for (int i = 0; i < allTransactions.size(); i++) { @@ -123,7 +123,7 @@ void concurrentAddTest() throws IOException { } catch (InterruptedException e) { Assertions.fail(); } - /* + /* Checking correctness of insertion by Has. */ CountDownLatch hasDone = new CountDownLatch(concurrencyDegree); @@ -235,7 +235,7 @@ void concurrentRemoveFirstFiveTest() throws IOException { AtomicInteger threadError = new AtomicInteger(); CountDownLatch addDone = new CountDownLatch(concurrencyDegree); Thread[] addThreads = new Thread[concurrencyDegree]; - /* + /* Adding all transactions concurrently. */ for (int i = 0; i < allTransactions.size(); i++) { @@ -256,7 +256,7 @@ void concurrentRemoveFirstFiveTest() throws IOException { } catch (InterruptedException e) { Assertions.fail(); } - /* + /* Removing first 5 concurrently */ int removeTill = concurrencyDegree / 2; @@ -362,8 +362,8 @@ void duplicationTest() throws IOException { for (Transaction transaction : allTransactions) { Assertions.assertFalse(db.add(transaction)); } - /* - After trying duplication, check again. + /* + After trying duplication, check again. */ for (Transaction transaction : allTransactions) { Assertions.assertTrue(db.has(transaction.id())); @@ -407,7 +407,7 @@ void concurrentDuplicationTest() throws IOException { } catch (InterruptedException e) { Assertions.fail(); } - /* + /* Checking correctness of insertion by Has. */ CountDownLatch hasDone = new CountDownLatch(concurrencyDegree); @@ -486,7 +486,7 @@ void concurrentDuplicationTest() throws IOException { */ CountDownLatch addDuplicateDone = new CountDownLatch(concurrencyDegree); Thread[] addDuplicateThreads = new Thread[concurrencyDegree]; - /* + /* Adding all transactions concurrently. */ for (int i = 0; i < allTransactions.size(); i++) { From 6c1d8cdeb44620d05186b8200d2e92ef8ebf0e45 Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Fri, 22 Apr 2022 00:09:52 +0300 Subject: [PATCH 17/31] Style modifications --- src/main/java/model/crypto/ecdsa/EcdsaSignature.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/model/crypto/ecdsa/EcdsaSignature.java b/src/main/java/model/crypto/ecdsa/EcdsaSignature.java index 17aa7f98..8623d61b 100644 --- a/src/main/java/model/crypto/ecdsa/EcdsaSignature.java +++ b/src/main/java/model/crypto/ecdsa/EcdsaSignature.java @@ -6,7 +6,7 @@ import model.crypto.Signature; import model.lightchain.Identifier; - /** +/** * ECDSA signature implementation with signer ID. */ public class EcdsaSignature extends Signature implements Serializable { From 3c9ec3479b717d53eac66e73babf1fa93bac7cc4 Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Fri, 22 Apr 2022 01:31:48 +0300 Subject: [PATCH 18/31] Merge branch 'master' into abdullah/distributed and some tests implemented --- src/main/java/model/codec/EncodedEntity.java | 14 ++-- src/main/java/model/crypto/Signature.java | 4 +- src/main/java/storage/mapdb/Distributed.java | 20 +++++- src/test/java/storage/DistributedTest.java | 74 +++++++++++++++++++- 4 files changed, 100 insertions(+), 12 deletions(-) diff --git a/src/main/java/model/codec/EncodedEntity.java b/src/main/java/model/codec/EncodedEntity.java index 1b96a92b..745cccce 100644 --- a/src/main/java/model/codec/EncodedEntity.java +++ b/src/main/java/model/codec/EncodedEntity.java @@ -10,6 +10,13 @@ public class EncodedEntity implements Serializable { private final byte[] bytes; private final String type; + + + // EncodedEntity(id.getBytes() || byte(i), "assignment") + public EncodedEntity(byte[] bytes, String type) { + this.bytes = bytes.clone(); + this.type = type; + } /** * Hashcode of entity. * @@ -37,13 +44,6 @@ public boolean equals(Object o) { EncodedEntity that = (EncodedEntity) o; return Arrays.equals(this.bytes, that.bytes); } - - // EncodedEntity(id.getBytes() || byte(i), "assignment") - public EncodedEntity(byte[] bytes, String type) { - this.bytes = bytes.clone(); - this.type = type; - } - public byte[] getBytes() { return bytes.clone(); } diff --git a/src/main/java/model/crypto/Signature.java b/src/main/java/model/crypto/Signature.java index dbeea7db..cabf32ef 100644 --- a/src/main/java/model/crypto/Signature.java +++ b/src/main/java/model/crypto/Signature.java @@ -8,7 +8,7 @@ /** * Represents abstract data type for the cryptographic digital signature used in LightChain. */ -public abstract class Signature extends Entity implements Serializable { +public abstract class Signature extends Entity { /** * The signature value in bytes. */ @@ -30,4 +30,6 @@ public Identifier getSignerId() { public byte[] getBytes() { return bytes.clone(); } + + } diff --git a/src/main/java/storage/mapdb/Distributed.java b/src/main/java/storage/mapdb/Distributed.java index 90915fca..7f441515 100644 --- a/src/main/java/storage/mapdb/Distributed.java +++ b/src/main/java/storage/mapdb/Distributed.java @@ -10,9 +10,10 @@ import org.mapdb.DB; import org.mapdb.DBMaker; import org.mapdb.HTreeMap; +import org.mapdb.Serializer; public class Distributed implements storage.Distributed { - private final JsonEncoder encoder = new JsonEncoder(); + private final DB db; private final ReentrantReadWriteLock lock; private static final String MAP_NAME = "distributed_map"; @@ -25,6 +26,7 @@ public Distributed(String filePath) { this.db = DBMaker.fileDB(filePath).make(); this.lock = new ReentrantReadWriteLock(); distributedMap = this.db.hashMap(MAP_NAME) + .keySerializer(Serializer.BYTE_ARRAY) .createOrOpen(); } @@ -55,6 +57,7 @@ public boolean has(Identifier entityId) { */ @Override public boolean add(Entity e) { + JsonEncoder encoder = new JsonEncoder(); boolean addBoolean; try { lock.writeLock().lock(); @@ -74,6 +77,7 @@ public boolean add(Entity e) { */ @Override public boolean remove(Entity e) { + JsonEncoder encoder = new JsonEncoder(); boolean removeBoolean; try { lock.writeLock().lock(); @@ -92,12 +96,14 @@ public boolean remove(Entity e) { */ @Override public Entity get(Identifier entityId) { - Entity decodedEntity = null; + + Entity decodedEntity=null; try { + JsonEncoder encoder = new JsonEncoder(); lock.readLock().lock(); EncodedEntity encodedEntity = (EncodedEntity) distributedMap.get(entityId.getBytes()); - assert encodedEntity != null; + System.out.println(encodedEntity.getType()); decodedEntity = encoder.decode(encodedEntity); } catch (ClassNotFoundException e) { //throw new ClassNotFoundException("could not found the class"+e); @@ -114,6 +120,7 @@ public Entity get(Identifier entityId) { */ @Override public ArrayList all() { + JsonEncoder encoder = new JsonEncoder(); ArrayList allEntities = new ArrayList<>(); for (Object encodedEntity : distributedMap.values()) { try { @@ -124,4 +131,11 @@ public ArrayList all() { } return allEntities; } + + /** + * It closes the database. + */ + public void closeDb() { + db.close(); + } } diff --git a/src/test/java/storage/DistributedTest.java b/src/test/java/storage/DistributedTest.java index 8ce62150..f94b7aef 100644 --- a/src/test/java/storage/DistributedTest.java +++ b/src/test/java/storage/DistributedTest.java @@ -1,9 +1,34 @@ package storage; +import model.Entity; +import model.lightchain.Block; +import modules.codec.JsonEncoder; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.testcontainers.shaded.org.apache.commons.io.FileUtils; +import storage.mapdb.BlocksMapDb; +import storage.mapdb.Distributed; +import unittest.fixtures.BlockFixture; +import unittest.fixtures.TransactionFixture; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; + /** * Encapsulates tests for distributed storage. */ public class DistributedTest { + + private static final String TEMP_DIR = "tempdir"; + private static final String TEMP_FILE_ID = "tempfileID.db"; + private Path tempdir; + private ArrayList allEntities; + private Distributed db; // TODO: implement a unit test for each of the following scenarios: // IMPORTANT NOTE: each test must have a separate instance of database, and the database MUST only created on a // temporary directory. @@ -36,5 +61,52 @@ public class DistributedTest { // Add should return false for each of them, while has should still return true, and get should be // able to retrieve the entity. // 6. Repeat test case 5 for concurrently adding entities as well as concurrently querying the - // database for has, get. + // database for has, get. + + /** + * Set the tests up. + */ + @BeforeEach + void setUp() throws IOException { + Path currentRelativePath = Paths.get(""); + tempdir = Files.createTempDirectory(currentRelativePath, TEMP_DIR); + db = new Distributed(tempdir.toAbsolutePath() + "/" + TEMP_FILE_ID); + allEntities = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + allEntities.add(BlockFixture.newBlock(10)); + } + for (int i = 0; i < 10; i++) { + allEntities.add(TransactionFixture.newTransaction(10)); + } + } + + /** + * Adding entities sequentially. + * + * @throws IOException throw IOException. + */ + @Test + void sequentialAddTest() throws IOException, ClassNotFoundException { + for (Entity entity : allEntities){ + Assertions.assertTrue(db.add(entity)); + } + for (Entity entity : allEntities){ + Assertions.assertTrue(db.has(entity.id())); + } + JsonEncoder encoder = new JsonEncoder(); + Entity entityx =BlockFixture.newBlock(); + System.out.println(encoder.decode(encoder.encode(entityx))); + + for (Entity entity : allEntities){ + Assertions.assertTrue(allEntities.contains(db.get(entity.id()))); + } + ArrayList all = db.all(); + Assertions.assertEquals(all.size(),20); + for (Entity entity : allEntities){ + Assertions.assertTrue(all.contains(entity)); + } + db.closeDb(); + FileUtils.deleteDirectory(new File(tempdir.toString())); + } + } From fc0dd87234a3f19295affe940bdbe9bb3d1c0c28 Mon Sep 17 00:00:00 2001 From: Ozan Date: Mon, 25 Apr 2022 12:42:26 +0300 Subject: [PATCH 19/31] resolves decoding issue of Signature, JsonEncoder is updated --- src/main/java/model/crypto/Hash.java | 4 ++- src/main/java/model/crypto/Sha3256Hash.java | 3 +- src/main/java/model/crypto/Signature.java | 2 +- src/main/java/modules/codec/JsonEncoder.java | 32 +++++++++++++++---- src/test/java/modules/JsonEncoderTest.java | 2 +- src/test/java/storage/DistributedTest.java | 2 +- .../java/unittest/fixtures/EntityFixture.java | 3 +- 7 files changed, 35 insertions(+), 13 deletions(-) diff --git a/src/main/java/model/crypto/Hash.java b/src/main/java/model/crypto/Hash.java index 2a02985a..d4e402e0 100644 --- a/src/main/java/model/crypto/Hash.java +++ b/src/main/java/model/crypto/Hash.java @@ -2,10 +2,12 @@ import model.lightchain.Identifier; +import java.io.Serializable; + /** * Represents abstract data type for the cryptographic hash function used in LightChain. */ -public abstract class Hash { +public abstract class Hash implements Serializable { /** * Actual value of hash in bytes. */ diff --git a/src/main/java/model/crypto/Sha3256Hash.java b/src/main/java/model/crypto/Sha3256Hash.java index 7943a858..ffe50277 100644 --- a/src/main/java/model/crypto/Sha3256Hash.java +++ b/src/main/java/model/crypto/Sha3256Hash.java @@ -1,5 +1,6 @@ package model.crypto; +import java.io.Serializable; import java.util.Arrays; import model.lightchain.Identifier; @@ -8,7 +9,7 @@ * Represents SHA3-256 data type which extends abstract Hash data type for * the cryptographic hash function used in LightChain. */ -public class Sha3256Hash extends Hash { +public class Sha3256Hash extends Hash implements Serializable { public static final int Size = 32; private final byte[] hashBytes; diff --git a/src/main/java/model/crypto/Signature.java b/src/main/java/model/crypto/Signature.java index cabf32ef..fdbc3671 100644 --- a/src/main/java/model/crypto/Signature.java +++ b/src/main/java/model/crypto/Signature.java @@ -8,7 +8,7 @@ /** * Represents abstract data type for the cryptographic digital signature used in LightChain. */ -public abstract class Signature extends Entity { +public abstract class Signature extends Entity implements Serializable{ /** * The signature value in bytes. */ diff --git a/src/main/java/modules/codec/JsonEncoder.java b/src/main/java/modules/codec/JsonEncoder.java index d5d3191f..392a45a0 100644 --- a/src/main/java/modules/codec/JsonEncoder.java +++ b/src/main/java/modules/codec/JsonEncoder.java @@ -1,5 +1,6 @@ package modules.codec; +import java.io.*; import java.lang.reflect.Type; import java.nio.charset.StandardCharsets; @@ -10,7 +11,7 @@ /** * Implements encoding and decoding using JSON. */ -public class JsonEncoder implements Codec { +public class JsonEncoder implements Codec, Serializable { /** * Encodes an Entity to an EncodedEntity. * @@ -19,8 +20,17 @@ public class JsonEncoder implements Codec { */ @Override public EncodedEntity encode(Entity e) { - Gson gson = new Gson(); - byte[] bytes = gson.toJson(e).getBytes(StandardCharsets.UTF_8); + byte[] bytes = new byte[0]; + try { + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + ObjectOutputStream out = null; + out = new ObjectOutputStream(bos); + out.writeObject(e); + out.flush(); + bytes = bos.toByteArray(); + } catch (IOException ex) { + ex.printStackTrace(); + } String type = e.getClass().getCanonicalName(); return new EncodedEntity(bytes, type); } @@ -33,9 +43,17 @@ public EncodedEntity encode(Entity e) { */ @Override public Entity decode(EncodedEntity e) throws ClassNotFoundException { - Gson gson = new Gson(); - String json = new String(e.getBytes().clone(), StandardCharsets.UTF_8); - return gson.fromJson(json, (Type) Class.forName(e.getType())); - + Entity entity = null; + try { + ByteArrayInputStream bis = new ByteArrayInputStream(e.getBytes().clone()); + ObjectInputStream inp = null; + inp = new ObjectInputStream(bis); + entity = (Entity) (Class.forName(e.getType())).cast(inp.readObject()); + } catch (IOException ex) { + ex.printStackTrace(); + } catch (ClassNotFoundException ex) { + throw new RuntimeException(ex); + } + return entity; } } diff --git a/src/test/java/modules/JsonEncoderTest.java b/src/test/java/modules/JsonEncoderTest.java index 0fab33e6..fee08b52 100644 --- a/src/test/java/modules/JsonEncoderTest.java +++ b/src/test/java/modules/JsonEncoderTest.java @@ -17,7 +17,7 @@ public class JsonEncoderTest { @Test public void testEncodingRoundTrip() throws ClassNotFoundException { JsonEncoder encoder = new JsonEncoder(); - EntityFixture entity = new EntityFixture(); + Entity entity = new EntityFixture(); Entity entityChanged = encoder.decode(encoder.encode(entity)); Assertions.assertEquals(entity, entityChanged); System.out.println("Entities are equal: " + entity.equals(entityChanged)); diff --git a/src/test/java/storage/DistributedTest.java b/src/test/java/storage/DistributedTest.java index f94b7aef..29ed7bc4 100644 --- a/src/test/java/storage/DistributedTest.java +++ b/src/test/java/storage/DistributedTest.java @@ -94,7 +94,7 @@ void sequentialAddTest() throws IOException, ClassNotFoundException { Assertions.assertTrue(db.has(entity.id())); } JsonEncoder encoder = new JsonEncoder(); - Entity entityx =BlockFixture.newBlock(); + Entity entityx = BlockFixture.newBlock(); System.out.println(encoder.decode(encoder.encode(entityx))); for (Entity entity : allEntities){ diff --git a/src/test/java/unittest/fixtures/EntityFixture.java b/src/test/java/unittest/fixtures/EntityFixture.java index 792a7658..554f7dbe 100644 --- a/src/test/java/unittest/fixtures/EntityFixture.java +++ b/src/test/java/unittest/fixtures/EntityFixture.java @@ -1,5 +1,6 @@ package unittest.fixtures; +import java.io.Serializable; import java.util.Arrays; import java.util.Objects; import java.util.Random; @@ -11,7 +12,7 @@ /** * Encapsulates test utilities for a LightChain entity. */ -public class EntityFixture extends Entity { +public class EntityFixture extends Entity implements Serializable { private static final String TYPE_FIXTURE_ENTITY = "fixture-entity-type"; private static final Random rand = new Random(); private final Identifier id; From e01c67654c7fd61f80822aa65b2d6f5821f39078 Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Thu, 28 Apr 2022 15:53:26 +0300 Subject: [PATCH 20/31] AddTest is implemented --- src/main/java/storage/mapdb/Distributed.java | 2 +- src/test/java/storage/DistributedTest.java | 123 +++++++++++++++++-- 2 files changed, 115 insertions(+), 10 deletions(-) diff --git a/src/main/java/storage/mapdb/Distributed.java b/src/main/java/storage/mapdb/Distributed.java index 7f441515..67a1d8ed 100644 --- a/src/main/java/storage/mapdb/Distributed.java +++ b/src/main/java/storage/mapdb/Distributed.java @@ -103,7 +103,7 @@ public Entity get(Identifier entityId) { JsonEncoder encoder = new JsonEncoder(); lock.readLock().lock(); EncodedEntity encodedEntity = (EncodedEntity) distributedMap.get(entityId.getBytes()); - System.out.println(encodedEntity.getType()); + assert encodedEntity != null; decodedEntity = encoder.decode(encodedEntity); } catch (ClassNotFoundException e) { //throw new ClassNotFoundException("could not found the class"+e); diff --git a/src/test/java/storage/DistributedTest.java b/src/test/java/storage/DistributedTest.java index 29ed7bc4..b02efbaa 100644 --- a/src/test/java/storage/DistributedTest.java +++ b/src/test/java/storage/DistributedTest.java @@ -18,6 +18,9 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; /** * Encapsulates tests for distributed storage. @@ -87,26 +90,128 @@ void setUp() throws IOException { */ @Test void sequentialAddTest() throws IOException, ClassNotFoundException { - for (Entity entity : allEntities){ + for (Entity entity : allEntities) { Assertions.assertTrue(db.add(entity)); } - for (Entity entity : allEntities){ + for (Entity entity : allEntities) { Assertions.assertTrue(db.has(entity.id())); } - JsonEncoder encoder = new JsonEncoder(); - Entity entityx = BlockFixture.newBlock(); - System.out.println(encoder.decode(encoder.encode(entityx))); - - for (Entity entity : allEntities){ + for (Entity entity : allEntities) { Assertions.assertTrue(allEntities.contains(db.get(entity.id()))); } ArrayList all = db.all(); - Assertions.assertEquals(all.size(),20); - for (Entity entity : allEntities){ + Assertions.assertEquals(all.size(), 20); + for (Entity entity : allEntities) { Assertions.assertTrue(all.contains(entity)); } db.closeDb(); FileUtils.deleteDirectory(new File(tempdir.toString())); } + /** + * Concurrent version of adding entities. + */ + @Test + void concurrentAddTest() throws IOException { + int concurrencyDegree = 20; + + AtomicInteger threadError = new AtomicInteger(); + CountDownLatch addDone = new CountDownLatch(concurrencyDegree); + Thread[] addThreads = new Thread[concurrencyDegree]; + /* + Adding all transactions concurrently. + */ + for (int i = 0; i < allEntities.size(); i++) { + int finalI = i; + addThreads[i] = new Thread(() -> { + if (!db.add(allEntities.get(finalI))) { + threadError.getAndIncrement(); + } + addDone.countDown(); + }); + } + for (Thread t : addThreads) { + t.start(); + } + try { + boolean doneOneTime = addDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Checking correctness of insertion by Has. + */ + CountDownLatch hasDone = new CountDownLatch(concurrencyDegree); + Thread[] hasThreads = new Thread[concurrencyDegree]; + for (int i = 0; i < allEntities.size(); i++) { + int finalI = i; + hasThreads[i] = new Thread(() -> { + if (!db.has((allEntities.get(finalI)).id())) { + threadError.getAndIncrement(); + } + hasDone.countDown(); + }); + } + + for (Thread t : hasThreads) { + t.start(); + } + try { + boolean doneOneTime = hasDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Checking correctness of insertion by GET. + */ + CountDownLatch getDone = new CountDownLatch(concurrencyDegree); + Thread[] getThreads = new Thread[concurrencyDegree]; + for (int i = 0; i < allEntities.size(); i++) { + int finalI = i; + getThreads[i] = new Thread(() -> { + if (!allEntities.contains(db.get(allEntities.get(finalI).id()))) { + threadError.getAndIncrement(); + } + getDone.countDown(); + }); + } + + for (Thread t : getThreads) { + t.start(); + } + try { + boolean doneOneTime = getDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + CountDownLatch doneAll = new CountDownLatch(concurrencyDegree); + Thread[] allThreads = new Thread[concurrencyDegree]; + ArrayList all = db.all(); + for (int i = 0; i < all.size(); i++) { + int finalI = i; + allThreads[i] = new Thread(() -> { + if (!all.contains(allEntities.get(finalI))) { + threadError.getAndIncrement(); + } + doneAll.countDown(); + }); + } + + for (Thread t : allThreads) { + t.start(); + } + try { + boolean doneOneTime = doneAll.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + db.closeDb(); + FileUtils.deleteDirectory(new File(tempdir.toString())); + } + + } From ee23170294db5617e0681be3d3216e79a46fc8d2 Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Thu, 28 Apr 2022 18:20:17 +0300 Subject: [PATCH 21/31] DistributedTest is implemented --- src/main/java/storage/mapdb/Distributed.java | 6 +- src/test/java/storage/DistributedTest.java | 414 ++++++++++++++++++- 2 files changed, 401 insertions(+), 19 deletions(-) diff --git a/src/main/java/storage/mapdb/Distributed.java b/src/main/java/storage/mapdb/Distributed.java index 67a1d8ed..14f67e97 100644 --- a/src/main/java/storage/mapdb/Distributed.java +++ b/src/main/java/storage/mapdb/Distributed.java @@ -97,13 +97,15 @@ public boolean remove(Entity e) { @Override public Entity get(Identifier entityId) { - Entity decodedEntity=null; + Entity decodedEntity = null; try { JsonEncoder encoder = new JsonEncoder(); lock.readLock().lock(); EncodedEntity encodedEntity = (EncodedEntity) distributedMap.get(entityId.getBytes()); - assert encodedEntity != null; + if (encodedEntity == null) { + return null; + } decodedEntity = encoder.decode(encodedEntity); } catch (ClassNotFoundException e) { //throw new ClassNotFoundException("could not found the class"+e); diff --git a/src/test/java/storage/DistributedTest.java b/src/test/java/storage/DistributedTest.java index b02efbaa..3cab982f 100644 --- a/src/test/java/storage/DistributedTest.java +++ b/src/test/java/storage/DistributedTest.java @@ -1,17 +1,5 @@ package storage; -import model.Entity; -import model.lightchain.Block; -import modules.codec.JsonEncoder; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.testcontainers.shaded.org.apache.commons.io.FileUtils; -import storage.mapdb.BlocksMapDb; -import storage.mapdb.Distributed; -import unittest.fixtures.BlockFixture; -import unittest.fixtures.TransactionFixture; - import java.io.File; import java.io.IOException; import java.nio.file.Files; @@ -22,6 +10,15 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import model.Entity; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.testcontainers.shaded.org.apache.commons.io.FileUtils; +import storage.mapdb.Distributed; +import unittest.fixtures.BlockFixture; +import unittest.fixtures.TransactionFixture; + /** * Encapsulates tests for distributed storage. */ @@ -77,8 +74,6 @@ void setUp() throws IOException { allEntities = new ArrayList<>(); for (int i = 0; i < 10; i++) { allEntities.add(BlockFixture.newBlock(10)); - } - for (int i = 0; i < 10; i++) { allEntities.add(TransactionFixture.newTransaction(10)); } } @@ -89,7 +84,7 @@ void setUp() throws IOException { * @throws IOException throw IOException. */ @Test - void sequentialAddTest() throws IOException, ClassNotFoundException { + void sequentialAddTest() throws IOException { for (Entity entity : allEntities) { Assertions.assertTrue(db.add(entity)); } @@ -105,7 +100,11 @@ void sequentialAddTest() throws IOException, ClassNotFoundException { Assertions.assertTrue(all.contains(entity)); } db.closeDb(); - FileUtils.deleteDirectory(new File(tempdir.toString())); + try { + FileUtils.deleteDirectory(new File(tempdir.toString())); + } catch (IOException e) { + throw new IOException("could not delete directory"); + } } /** @@ -190,6 +189,7 @@ void concurrentAddTest() throws IOException { CountDownLatch doneAll = new CountDownLatch(concurrencyDegree); Thread[] allThreads = new Thread[concurrencyDegree]; ArrayList all = db.all(); + Assertions.assertEquals(all.size(), 20); for (int i = 0; i < all.size(); i++) { int finalI = i; allThreads[i] = new Thread(() -> { @@ -210,8 +210,388 @@ void concurrentAddTest() throws IOException { Assertions.fail(); } db.closeDb(); - FileUtils.deleteDirectory(new File(tempdir.toString())); + try { + FileUtils.deleteDirectory(new File(tempdir.toString())); + } catch (IOException e) { + throw new IOException("could not delete directory"); + } + } + + /** + * Remove the first 10 entities and test methods. + * + * @throws IOException for any unhappy path. + */ + @Test + void removeFirstTenTest() throws IOException { + for (Entity entity : allEntities) { + Assertions.assertTrue(db.add(entity)); + } + for (int i = 0; i < 10; i++) { + Assertions.assertTrue(db.remove(allEntities.get(i))); + } + for (int i = 0; i < 20; i++) { + if (i < 10) { + Assertions.assertFalse(db.has(allEntities.get(i).id()) || db.all().contains(allEntities.get(i))); + } else { + Assertions.assertTrue(db.has(allEntities.get(i).id()) && db.all().contains(allEntities.get(i))); + } + } + db.closeDb(); + try { + FileUtils.deleteDirectory(new File(tempdir.toString())); + } catch (IOException e) { + throw new IOException("could not delete directory"); + } } + /** + * Concurrent version of remove first ten test. + */ + @Test + void concurrentRemoveFirstTenTest() throws IOException { + int concurrencyDegree = 20; + + AtomicInteger threadError = new AtomicInteger(); + CountDownLatch addDone = new CountDownLatch(concurrencyDegree); + Thread[] addThreads = new Thread[concurrencyDegree]; + /* + Adding all transactions concurrently. + */ + for (int i = 0; i < allEntities.size(); i++) { + int finalI = i; + addThreads[i] = new Thread(() -> { + if (!db.add(allEntities.get(finalI))) { + threadError.getAndIncrement(); + } + addDone.countDown(); + }); + } + for (Thread t : addThreads) { + t.start(); + } + try { + boolean doneOneTime = addDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Removing first 10 concurrently + */ + int removeTill = concurrencyDegree / 2; + CountDownLatch doneRemove = new CountDownLatch(removeTill); + Thread[] removeThreads = new Thread[removeTill]; + for (int i = 0; i < removeTill; i++) { + int finalI = i; + removeThreads[i] = new Thread(() -> { + if (!db.remove(allEntities.get(finalI))) { + threadError.getAndIncrement(); + } + doneRemove.countDown(); + }); + } + + for (Thread t : removeThreads) { + t.start(); + } + try { + boolean doneOneTime = doneRemove.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Check Has method after removing. + */ + CountDownLatch doneHas = new CountDownLatch(concurrencyDegree); + Thread[] hasThreads = new Thread[concurrencyDegree]; + for (int i = 0; i < allEntities.size(); i++) { + int finalI = i; + int finalI1 = i; + hasThreads[i] = new Thread(() -> { + if (allEntities.indexOf(allEntities.get(finalI)) < 10) { + if (db.has(allEntities.get(finalI1).id())) { + threadError.getAndIncrement(); + } + } else { + if (!db.has(allEntities.get(finalI).id())) { + threadError.getAndIncrement(); + } + } + doneHas.countDown(); + }); + } + + for (Thread t : hasThreads) { + t.start(); + } + try { + boolean doneOneTime = doneHas.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Check get method after removing. + */ + CountDownLatch getDone = new CountDownLatch(concurrencyDegree / 2); + Thread[] getThreads = new Thread[concurrencyDegree / 2]; + for (int i = 0; i < concurrencyDegree / 2; i++) { + int finalI = i; + int finalI1 = i + 10; + getThreads[i] = new Thread(() -> { + if (allEntities.contains(db.get(allEntities.get(finalI).id())) + || !allEntities.contains(db.get(allEntities.get(finalI1).id()))) { + threadError.getAndIncrement(); + } + getDone.countDown(); + }); + } + + for (Thread t : getThreads) { + t.start(); + } + try { + boolean doneOneTime = getDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + Assertions.assertEquals(0, threadError.get()); + db.closeDb(); + try { + FileUtils.deleteDirectory(new File(tempdir.toString())); + } catch (IOException e) { + throw new IOException("could not delete directory"); + } + } + + /** + * Add 20 entities already exist and return false expected. + * + * @throws IOException for any unhappy path. + */ + @Test + void duplicationTest() throws IOException { + for (Entity entity : allEntities) { + Assertions.assertTrue(db.add(entity)); + } + for (Entity entity : allEntities) { + Assertions.assertTrue(db.has(entity.id())); + } + ArrayList all = db.all(); + Assertions.assertEquals(all.size(), 20); + for (Entity entity : all) { + Assertions.assertTrue(allEntities.contains(entity)); + } + for (Entity entity : allEntities) { + Assertions.assertTrue(allEntities.contains(db.get(entity.id()))); + } + for (Entity entity : allEntities) { + Assertions.assertFalse(db.add(entity)); + } + /* + After trying duplication, check again. + */ + for (Entity entity : allEntities) { + Assertions.assertTrue(db.has(entity.id())); + } + for (Entity entity : allEntities) { + Assertions.assertTrue(allEntities.contains(db.get(entity.id()))); + } + db.closeDb(); + try { + FileUtils.deleteDirectory(new File(tempdir.toString())); + } catch (IOException e) { + throw new IOException("could not delete directory"); + } + } + + + @Test + void concurrentDuplicationTest() throws IOException { + int concurrencyDegree = 20; + + AtomicInteger threadError = new AtomicInteger(); + CountDownLatch addDone = new CountDownLatch(concurrencyDegree); + Thread[] addThreads = new Thread[concurrencyDegree]; + /* + Adding all transactions concurrently. + */ + for (int i = 0; i < allEntities.size(); i++) { + int finalI = i; + addThreads[i] = new Thread(() -> { + if (!db.add(allEntities.get(finalI))) { + threadError.getAndIncrement(); + } + addDone.countDown(); + }); + } + for (Thread t : addThreads) { + t.start(); + } + try { + boolean doneOneTime = addDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Checking correctness of insertion by Has. + */ + CountDownLatch hasDone = new CountDownLatch(concurrencyDegree); + Thread[] hasThreads = new Thread[concurrencyDegree]; + for (int i = 0; i < allEntities.size(); i++) { + int finalI = i; + hasThreads[i] = new Thread(() -> { + if (!db.has((allEntities.get(finalI)).id())) { + threadError.getAndIncrement(); + } + hasDone.countDown(); + }); + } + + for (Thread t : hasThreads) { + t.start(); + } + try { + boolean doneOneTime = hasDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Checking correctness of insertion by GET. + */ + CountDownLatch getDone = new CountDownLatch(concurrencyDegree); + Thread[] getThreads = new Thread[concurrencyDegree]; + for (int i = 0; i < allEntities.size(); i++) { + int finalI = i; + getThreads[i] = new Thread(() -> { + if (!allEntities.contains(db.get(allEntities.get(finalI).id()))) { + threadError.getAndIncrement(); + } + getDone.countDown(); + }); + } + + for (Thread t : getThreads) { + t.start(); + } + try { + boolean doneOneTime = getDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Retrieving all concurrently. + */ + CountDownLatch doneAll = new CountDownLatch(concurrencyDegree); + Thread[] allThreads = new Thread[concurrencyDegree]; + ArrayList all = db.all(); + Assertions.assertEquals(all.size(), 20); + for (int i = 0; i < all.size(); i++) { + int finalI = i; + allThreads[i] = new Thread(() -> { + if (!all.contains(allEntities.get(finalI))) { + threadError.getAndIncrement(); + } + doneAll.countDown(); + }); + } + + for (Thread t : allThreads) { + t.start(); + } + try { + boolean doneOneTime = doneAll.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Adding existing entities. + */ + CountDownLatch addDuplicateDone = new CountDownLatch(concurrencyDegree); + Thread[] addDuplicateThreads = new Thread[concurrencyDegree]; + /* + Adding all transactions concurrently. + */ + for (int i = 0; i < allEntities.size(); i++) { + int finalI = i; + addDuplicateThreads[i] = new Thread(() -> { + if (db.add(allEntities.get(finalI))) { + threadError.getAndIncrement(); + } + addDuplicateDone.countDown(); + }); + } + for (Thread t : addDuplicateThreads) { + t.start(); + } + try { + boolean doneOneTime = addDuplicateDone.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Checking correctness of insertion by Has again. + */ + CountDownLatch hasDone2 = new CountDownLatch(concurrencyDegree); + Thread[] hasThreads2 = new Thread[concurrencyDegree]; + for (int i = 0; i < allEntities.size(); i++) { + int finalI = i; + hasThreads2[i] = new Thread(() -> { + if (!db.has((allEntities.get(finalI)).id())) { + threadError.getAndIncrement(); + } + hasDone2.countDown(); + }); + } + + for (Thread t : hasThreads2) { + t.start(); + } + try { + boolean doneOneTime = hasDone2.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + /* + Checking correctness of insertion by Get again. + */ + CountDownLatch getDone2 = new CountDownLatch(concurrencyDegree); + Thread[] getThreads2 = new Thread[concurrencyDegree]; + for (int i = 0; i < allEntities.size(); i++) { + int finalI = i; + getThreads2[i] = new Thread(() -> { + if (!allEntities.contains(db.get(allEntities.get(finalI).id()))) { + threadError.getAndIncrement(); + } + getDone2.countDown(); + }); + } + + for (Thread t : getThreads2) { + t.start(); + } + try { + boolean doneOneTime = getDone2.await(60, TimeUnit.SECONDS); + Assertions.assertTrue(doneOneTime); + } catch (InterruptedException e) { + Assertions.fail(); + } + Assertions.assertEquals(0, threadError.get()); + db.closeDb(); + try { + FileUtils.deleteDirectory(new File(tempdir.toString())); + } catch (IOException e) { + throw new IOException("could not delete directory"); + } + } } From 9cae3f3772f0b478abfe99cdd396347dd8ab8ce6 Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Thu, 28 Apr 2022 18:28:14 +0300 Subject: [PATCH 22/31] Style modification --- src/main/java/model/codec/EncodedEntity.java | 12 ++++++++---- src/main/java/model/crypto/Signature.java | 3 +-- src/main/java/modules/codec/JsonEncoder.java | 3 --- src/main/java/storage/mapdb/Distributed.java | 5 ++++- src/test/java/storage/DistributedTest.java | 6 +++++- 5 files changed, 18 insertions(+), 11 deletions(-) diff --git a/src/main/java/model/codec/EncodedEntity.java b/src/main/java/model/codec/EncodedEntity.java index 745cccce..8dbfca16 100644 --- a/src/main/java/model/codec/EncodedEntity.java +++ b/src/main/java/model/codec/EncodedEntity.java @@ -10,13 +10,16 @@ public class EncodedEntity implements Serializable { private final byte[] bytes; private final String type; - - - // EncodedEntity(id.getBytes() || byte(i), "assignment") + /** + * Creates encoded entity. + * @param bytes bytes of entity. + * @param type types of entity. + */ public EncodedEntity(byte[] bytes, String type) { this.bytes = bytes.clone(); this.type = type; } + /** * Hashcode of entity. * @@ -28,7 +31,7 @@ public int hashCode() { } /** - * Check if objects are equal + * Check if objects are equal. * * @param o encodedentity. * @return true if equals. @@ -44,6 +47,7 @@ public boolean equals(Object o) { EncodedEntity that = (EncodedEntity) o; return Arrays.equals(this.bytes, that.bytes); } + public byte[] getBytes() { return bytes.clone(); } diff --git a/src/main/java/model/crypto/Signature.java b/src/main/java/model/crypto/Signature.java index fdbc3671..7d0cbc3c 100644 --- a/src/main/java/model/crypto/Signature.java +++ b/src/main/java/model/crypto/Signature.java @@ -8,7 +8,7 @@ /** * Represents abstract data type for the cryptographic digital signature used in LightChain. */ -public abstract class Signature extends Entity implements Serializable{ +public abstract class Signature extends Entity implements Serializable { /** * The signature value in bytes. */ @@ -31,5 +31,4 @@ public byte[] getBytes() { return bytes.clone(); } - } diff --git a/src/main/java/modules/codec/JsonEncoder.java b/src/main/java/modules/codec/JsonEncoder.java index 392a45a0..50edc9c4 100644 --- a/src/main/java/modules/codec/JsonEncoder.java +++ b/src/main/java/modules/codec/JsonEncoder.java @@ -1,10 +1,7 @@ package modules.codec; import java.io.*; -import java.lang.reflect.Type; -import java.nio.charset.StandardCharsets; -import com.google.gson.Gson; import model.Entity; import model.codec.EncodedEntity; diff --git a/src/main/java/storage/mapdb/Distributed.java b/src/main/java/storage/mapdb/Distributed.java index 14f67e97..63cefa15 100644 --- a/src/main/java/storage/mapdb/Distributed.java +++ b/src/main/java/storage/mapdb/Distributed.java @@ -12,6 +12,9 @@ import org.mapdb.HTreeMap; import org.mapdb.Serializer; +/** + * Distributed databese that store encoded entities. + */ public class Distributed implements storage.Distributed { private final DB db; @@ -20,7 +23,7 @@ public class Distributed implements storage.Distributed { private final HTreeMap distributedMap; /** - * Creates DistributedMapDb + * Creates DistributedMapDb. */ public Distributed(String filePath) { this.db = DBMaker.fileDB(filePath).make(); diff --git a/src/test/java/storage/DistributedTest.java b/src/test/java/storage/DistributedTest.java index 3cab982f..8bb63093 100644 --- a/src/test/java/storage/DistributedTest.java +++ b/src/test/java/storage/DistributedTest.java @@ -408,7 +408,11 @@ void duplicationTest() throws IOException { } } - + /** + * Concurrent version of duplication test. + * + * @throws IOException for any unhappy path for dir deletion. + */ @Test void concurrentDuplicationTest() throws IOException { int concurrencyDegree = 20; From 7ee60b3c87ea6ae9047fc45da3c9ee0c57cc2764 Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Thu, 28 Apr 2022 18:30:14 +0300 Subject: [PATCH 23/31] Style modification --- .../mapdb/{Distributed.java => DistributedMapDb.java} | 4 ++-- .../{DistributedTest.java => DistributedMapDbTest.java} | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) rename src/main/java/storage/mapdb/{Distributed.java => DistributedMapDb.java} (97%) rename src/test/java/storage/{DistributedTest.java => DistributedMapDbTest.java} (99%) diff --git a/src/main/java/storage/mapdb/Distributed.java b/src/main/java/storage/mapdb/DistributedMapDb.java similarity index 97% rename from src/main/java/storage/mapdb/Distributed.java rename to src/main/java/storage/mapdb/DistributedMapDb.java index 63cefa15..511fb596 100644 --- a/src/main/java/storage/mapdb/Distributed.java +++ b/src/main/java/storage/mapdb/DistributedMapDb.java @@ -15,7 +15,7 @@ /** * Distributed databese that store encoded entities. */ -public class Distributed implements storage.Distributed { +public class DistributedMapDb implements storage.Distributed { private final DB db; private final ReentrantReadWriteLock lock; @@ -25,7 +25,7 @@ public class Distributed implements storage.Distributed { /** * Creates DistributedMapDb. */ - public Distributed(String filePath) { + public DistributedMapDb(String filePath) { this.db = DBMaker.fileDB(filePath).make(); this.lock = new ReentrantReadWriteLock(); distributedMap = this.db.hashMap(MAP_NAME) diff --git a/src/test/java/storage/DistributedTest.java b/src/test/java/storage/DistributedMapDbTest.java similarity index 99% rename from src/test/java/storage/DistributedTest.java rename to src/test/java/storage/DistributedMapDbTest.java index 8bb63093..900c86ea 100644 --- a/src/test/java/storage/DistributedTest.java +++ b/src/test/java/storage/DistributedMapDbTest.java @@ -15,20 +15,20 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.testcontainers.shaded.org.apache.commons.io.FileUtils; -import storage.mapdb.Distributed; +import storage.mapdb.DistributedMapDb; import unittest.fixtures.BlockFixture; import unittest.fixtures.TransactionFixture; /** * Encapsulates tests for distributed storage. */ -public class DistributedTest { +public class DistributedMapDbTest { private static final String TEMP_DIR = "tempdir"; private static final String TEMP_FILE_ID = "tempfileID.db"; private Path tempdir; private ArrayList allEntities; - private Distributed db; + private DistributedMapDb db; // TODO: implement a unit test for each of the following scenarios: // IMPORTANT NOTE: each test must have a separate instance of database, and the database MUST only created on a // temporary directory. @@ -70,7 +70,7 @@ public class DistributedTest { void setUp() throws IOException { Path currentRelativePath = Paths.get(""); tempdir = Files.createTempDirectory(currentRelativePath, TEMP_DIR); - db = new Distributed(tempdir.toAbsolutePath() + "/" + TEMP_FILE_ID); + db = new DistributedMapDb(tempdir.toAbsolutePath() + "/" + TEMP_FILE_ID); allEntities = new ArrayList<>(); for (int i = 0; i < 10; i++) { allEntities.add(BlockFixture.newBlock(10)); From ca57dbed91a31e2f37e0f834aa4a05f427fb7519 Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Thu, 28 Apr 2022 18:30:14 +0300 Subject: [PATCH 24/31] Style modification --- .../mapdb/{Distributed.java => DistributedMapDb.java} | 4 ++-- .../{DistributedTest.java => DistributedMapDbTest.java} | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) rename src/main/java/storage/mapdb/{Distributed.java => DistributedMapDb.java} (97%) rename src/test/java/storage/{DistributedTest.java => DistributedMapDbTest.java} (99%) diff --git a/src/main/java/storage/mapdb/Distributed.java b/src/main/java/storage/mapdb/DistributedMapDb.java similarity index 97% rename from src/main/java/storage/mapdb/Distributed.java rename to src/main/java/storage/mapdb/DistributedMapDb.java index 63cefa15..511fb596 100644 --- a/src/main/java/storage/mapdb/Distributed.java +++ b/src/main/java/storage/mapdb/DistributedMapDb.java @@ -15,7 +15,7 @@ /** * Distributed databese that store encoded entities. */ -public class Distributed implements storage.Distributed { +public class DistributedMapDb implements storage.Distributed { private final DB db; private final ReentrantReadWriteLock lock; @@ -25,7 +25,7 @@ public class Distributed implements storage.Distributed { /** * Creates DistributedMapDb. */ - public Distributed(String filePath) { + public DistributedMapDb(String filePath) { this.db = DBMaker.fileDB(filePath).make(); this.lock = new ReentrantReadWriteLock(); distributedMap = this.db.hashMap(MAP_NAME) diff --git a/src/test/java/storage/DistributedTest.java b/src/test/java/storage/DistributedMapDbTest.java similarity index 99% rename from src/test/java/storage/DistributedTest.java rename to src/test/java/storage/DistributedMapDbTest.java index 8bb63093..900c86ea 100644 --- a/src/test/java/storage/DistributedTest.java +++ b/src/test/java/storage/DistributedMapDbTest.java @@ -15,20 +15,20 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.testcontainers.shaded.org.apache.commons.io.FileUtils; -import storage.mapdb.Distributed; +import storage.mapdb.DistributedMapDb; import unittest.fixtures.BlockFixture; import unittest.fixtures.TransactionFixture; /** * Encapsulates tests for distributed storage. */ -public class DistributedTest { +public class DistributedMapDbTest { private static final String TEMP_DIR = "tempdir"; private static final String TEMP_FILE_ID = "tempfileID.db"; private Path tempdir; private ArrayList allEntities; - private Distributed db; + private DistributedMapDb db; // TODO: implement a unit test for each of the following scenarios: // IMPORTANT NOTE: each test must have a separate instance of database, and the database MUST only created on a // temporary directory. @@ -70,7 +70,7 @@ public class DistributedTest { void setUp() throws IOException { Path currentRelativePath = Paths.get(""); tempdir = Files.createTempDirectory(currentRelativePath, TEMP_DIR); - db = new Distributed(tempdir.toAbsolutePath() + "/" + TEMP_FILE_ID); + db = new DistributedMapDb(tempdir.toAbsolutePath() + "/" + TEMP_FILE_ID); allEntities = new ArrayList<>(); for (int i = 0; i < 10; i++) { allEntities.add(BlockFixture.newBlock(10)); From cc2d6aafab9eac412ddac204e1e0198170dd9ecc Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Thu, 28 Apr 2022 18:35:48 +0300 Subject: [PATCH 25/31] Style modification --- src/main/java/model/codec/EncodedEntity.java | 1 + src/main/java/model/crypto/Hash.java | 4 ++-- src/main/java/storage/Distributed.java | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/main/java/model/codec/EncodedEntity.java b/src/main/java/model/codec/EncodedEntity.java index 8dbfca16..8942af6a 100644 --- a/src/main/java/model/codec/EncodedEntity.java +++ b/src/main/java/model/codec/EncodedEntity.java @@ -12,6 +12,7 @@ public class EncodedEntity implements Serializable { /** * Creates encoded entity. + * * @param bytes bytes of entity. * @param type types of entity. */ diff --git a/src/main/java/model/crypto/Hash.java b/src/main/java/model/crypto/Hash.java index d4e402e0..d5c92531 100644 --- a/src/main/java/model/crypto/Hash.java +++ b/src/main/java/model/crypto/Hash.java @@ -1,9 +1,9 @@ package model.crypto; -import model.lightchain.Identifier; - import java.io.Serializable; +import model.lightchain.Identifier; + /** * Represents abstract data type for the cryptographic hash function used in LightChain. */ diff --git a/src/main/java/storage/Distributed.java b/src/main/java/storage/Distributed.java index c1143e6e..1fcd09ce 100644 --- a/src/main/java/storage/Distributed.java +++ b/src/main/java/storage/Distributed.java @@ -3,7 +3,7 @@ import java.util.ArrayList; import model.Entity; -import model.lightchain.Block; + import model.lightchain.Identifier; /** From ba335fd636d3d837898ad01a6d3d38c2f6114c19 Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Thu, 28 Apr 2022 18:37:33 +0300 Subject: [PATCH 26/31] Style modification --- src/main/java/storage/Distributed.java | 1 - 1 file changed, 1 deletion(-) diff --git a/src/main/java/storage/Distributed.java b/src/main/java/storage/Distributed.java index 1fcd09ce..b4440251 100644 --- a/src/main/java/storage/Distributed.java +++ b/src/main/java/storage/Distributed.java @@ -3,7 +3,6 @@ import java.util.ArrayList; import model.Entity; - import model.lightchain.Identifier; /** From 99d5c9931c93baa39d0dba317eacd953917d57ee Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Thu, 28 Apr 2022 18:48:15 +0300 Subject: [PATCH 27/31] concurrently addition tests is fixed --- src/test/java/storage/DistributedMapDbTest.java | 1 + 1 file changed, 1 insertion(+) diff --git a/src/test/java/storage/DistributedMapDbTest.java b/src/test/java/storage/DistributedMapDbTest.java index 900c86ea..b9a533cd 100644 --- a/src/test/java/storage/DistributedMapDbTest.java +++ b/src/test/java/storage/DistributedMapDbTest.java @@ -209,6 +209,7 @@ void concurrentAddTest() throws IOException { } catch (InterruptedException e) { Assertions.fail(); } + Assertions.assertEquals(0, threadError.get()); db.closeDb(); try { FileUtils.deleteDirectory(new File(tempdir.toString())); From 7d20b7b13379d2958f73569415ae91fae0bb2cf9 Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Sun, 22 May 2022 11:11:33 +0300 Subject: [PATCH 28/31] apply revision for tests --- .../java/storage/DistributedMapDbTest.java | 567 +++++++----------- 1 file changed, 219 insertions(+), 348 deletions(-) diff --git a/src/test/java/storage/DistributedMapDbTest.java b/src/test/java/storage/DistributedMapDbTest.java index b9a533cd..185e0e3c 100644 --- a/src/test/java/storage/DistributedMapDbTest.java +++ b/src/test/java/storage/DistributedMapDbTest.java @@ -11,6 +11,7 @@ import java.util.concurrent.atomic.AtomicInteger; import model.Entity; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -29,42 +30,9 @@ public class DistributedMapDbTest { private Path tempdir; private ArrayList allEntities; private DistributedMapDb db; - // TODO: implement a unit test for each of the following scenarios: - // IMPORTANT NOTE: each test must have a separate instance of database, and the database MUST only created on a - // temporary directory. - // In following tests by a "new" entity, we mean an entity that already does not exist in the database, - // and by a "duplicate" entity, we mean one that already exists in the database. - // 1. When adding 20 new entities of different types (10 transactions and 10 blocks) sequentially, - // the Add method must return true for all of them. Moreover, after - // adding entities are done, querying the Has method for each of the entities should return true. - // After adding all entities - // are done, each entity must be retrievable using both its id (get). Also, when - // querying All method, list of all 20 entities must be returned. - // 2. Repeat test case 1 for concurrently adding entities as well as concurrently querying the database for has, and - // get. - // 3. Add 20 new entities sequentially (10 transactions and 10 blocks), check that they are added correctly, i.e., - // while adding each entity Add must return - // true, Has returns true for each of them, each entity is retrievable by its identifier, - // and All returns list of all of them. - // Then Remove the first 10 entities (5 blocks and 5 transactions) sequentially. - // While Removing each of them, the Remove should return true. Then query all 20 entities using has, and get. - // Has should return false for the first 5 blocks amd 5 transactions that have been removed, - // and get should return null. But for the last 5 blocks and 5 transactions, has should return true, and get - // should successfully retrieve the exact entity. - // Also, All should return only the last 5 blocks and 5 transactions. - // 4. Repeat test case 3 for concurrently adding and removing entities as well as concurrently querying the - // database for has, and get. - // 5. Add 20 new entities (10 blocks, and 10 transactions) - // and check that all of them are added correctly, i.e., while adding each entity - // Add must return true, has returns true for each of them, and All returns list of all of them. Moreover, each - // entity is retrievable using its identifier (get). Then try Adding all of them again, and - // Add should return false for each of them, while has should still return true, and get should be - // able to retrieve the entity. - // 6. Repeat test case 5 for concurrently adding entities as well as concurrently querying the - // database for has, get. /** - * Set the tests up. + * Initialize database. */ @BeforeEach void setUp() throws IOException { @@ -79,7 +47,24 @@ void setUp() throws IOException { } /** - * Adding entities sequentially. + * Closes database. + * + * @throws IOException if deleting temporary directory faces unhappy path. + */ + @AfterEach + void cleanup() throws IOException { + db.closeDb(); + FileUtils.deleteDirectory(new File(tempdir.toString())); + } + + + /** + * When adding 20 new entities of different types (10 transactions and 10 blocks) sequentially, + * the Add method must return true for all of them. Moreover, after + * adding entities are done, querying the Has method for each of the entities should return true. + * After adding all entities + * are done, each entity must be retrievable using both its id (get). Also, when + * querying All method, list of all 20 entities must be returned. * * @throws IOException throw IOException. */ @@ -108,118 +93,41 @@ void sequentialAddTest() throws IOException { } /** - * Concurrent version of adding entities. + * When adding 20 new entities of different types (10 transactions and 10 blocks) CONCURRENTLY, + * the Add method must return true for all of them. Moreover, after + * adding entities are done, querying the Has method for each of the entities should return true. + * After adding all entities + * are done, each entity must be retrievable using both its id (get). Also, when + * querying All method, list of all 20 entities must be returned. + * */ @Test - void concurrentAddTest() throws IOException { - int concurrencyDegree = 20; - - AtomicInteger threadError = new AtomicInteger(); - CountDownLatch addDone = new CountDownLatch(concurrencyDegree); - Thread[] addThreads = new Thread[concurrencyDegree]; - /* - Adding all transactions concurrently. - */ - for (int i = 0; i < allEntities.size(); i++) { - int finalI = i; - addThreads[i] = new Thread(() -> { - if (!db.add(allEntities.get(finalI))) { - threadError.getAndIncrement(); - } - addDone.countDown(); - }); - } - for (Thread t : addThreads) { - t.start(); - } - try { - boolean doneOneTime = addDone.await(60, TimeUnit.SECONDS); - Assertions.assertTrue(doneOneTime); - } catch (InterruptedException e) { - Assertions.fail(); - } + void concurrentAddTest() { /* - Checking correctness of insertion by Has. + Adding all blocks concurrently. */ - CountDownLatch hasDone = new CountDownLatch(concurrencyDegree); - Thread[] hasThreads = new Thread[concurrencyDegree]; - for (int i = 0; i < allEntities.size(); i++) { - int finalI = i; - hasThreads[i] = new Thread(() -> { - if (!db.has((allEntities.get(finalI)).id())) { - threadError.getAndIncrement(); - } - hasDone.countDown(); - }); - } + this.addAllEntitiesConcurrently(true); - for (Thread t : hasThreads) { - t.start(); - } - try { - boolean doneOneTime = hasDone.await(60, TimeUnit.SECONDS); - Assertions.assertTrue(doneOneTime); - } catch (InterruptedException e) { - Assertions.fail(); - } - /* - Checking correctness of insertion by GET. + /* + All blocks should be retrievable */ - CountDownLatch getDone = new CountDownLatch(concurrencyDegree); - Thread[] getThreads = new Thread[concurrencyDegree]; - for (int i = 0; i < allEntities.size(); i++) { - int finalI = i; - getThreads[i] = new Thread(() -> { - if (!allEntities.contains(db.get(allEntities.get(finalI).id()))) { - threadError.getAndIncrement(); - } - getDone.countDown(); - }); - } + this.checkForHasConcurrently(0); + this.checkForGetConcurrently(0); + this.checkForAllConcurrently(0); - for (Thread t : getThreads) { - t.start(); - } - try { - boolean doneOneTime = getDone.await(60, TimeUnit.SECONDS); - Assertions.assertTrue(doneOneTime); - } catch (InterruptedException e) { - Assertions.fail(); - } - CountDownLatch doneAll = new CountDownLatch(concurrencyDegree); - Thread[] allThreads = new Thread[concurrencyDegree]; - ArrayList all = db.all(); - Assertions.assertEquals(all.size(), 20); - for (int i = 0; i < all.size(); i++) { - int finalI = i; - allThreads[i] = new Thread(() -> { - if (!all.contains(allEntities.get(finalI))) { - threadError.getAndIncrement(); - } - doneAll.countDown(); - }); - } - - for (Thread t : allThreads) { - t.start(); - } - try { - boolean doneOneTime = doneAll.await(60, TimeUnit.SECONDS); - Assertions.assertTrue(doneOneTime); - } catch (InterruptedException e) { - Assertions.fail(); - } - Assertions.assertEquals(0, threadError.get()); - db.closeDb(); - try { - FileUtils.deleteDirectory(new File(tempdir.toString())); - } catch (IOException e) { - throw new IOException("could not delete directory"); - } } /** - * Remove the first 10 entities and test methods. + * Add 20 new entities sequentially (10 transactions and 10 blocks), check that they are added correctly, i.e., + * while adding each entity Add must return + * true, Has returns true for each of them, each entity is retrievable by its identifier, + * and All returns list of all of them. + * Then Remove the first 10 entities (5 blocks and 5 transactions) sequentially. + * While Removing each of them, the Remove should return true. Then query all 20 entities using has, and get. + * Has should return false for the first 5 blocks amd 5 transactions that have been removed, + * and get should return null. But for the last 5 blocks and 5 transactions, has should return true, and get + * should successfully retrieve the exact entity. + * Also, All should return only the last 5 blocks and 5 transactions. * * @throws IOException for any unhappy path. */ @@ -247,43 +155,57 @@ void removeFirstTenTest() throws IOException { } /** - * Concurrent version of remove first ten test. + * Add 20 new entities CONCURRENTLY (10 transactions and 10 blocks), check that they are added correctly, i.e., + * while adding each entity Add must return + * true, Has returns true for each of them, each entity is retrievable by its identifier, + * and All returns list of all of them. + * Then Remove the first 10 entities (5 blocks and 5 transactions) sequentially. + * While Removing each of them, the Remove should return true. Then query all 20 entities using has, and get. + * Has should return false for the first 5 blocks amd 5 transactions that have been removed, + * and get should return null. But for the last 5 blocks and 5 transactions, has should return true, and get + * should successfully retrieve the exact entity. + * Also, All should return only the last 5 blocks and 5 transactions. + * */ @Test - void concurrentRemoveFirstTenTest() throws IOException { - int concurrencyDegree = 20; - - AtomicInteger threadError = new AtomicInteger(); - CountDownLatch addDone = new CountDownLatch(concurrencyDegree); - Thread[] addThreads = new Thread[concurrencyDegree]; - /* - Adding all transactions concurrently. + void concurrentRemoveFirstTenTest() { + /* + Adding all entities concurrently. */ - for (int i = 0; i < allEntities.size(); i++) { - int finalI = i; - addThreads[i] = new Thread(() -> { - if (!db.add(allEntities.get(finalI))) { - threadError.getAndIncrement(); - } - addDone.countDown(); - }); - } - for (Thread t : addThreads) { - t.start(); - } - try { - boolean doneOneTime = addDone.await(60, TimeUnit.SECONDS); - Assertions.assertTrue(doneOneTime); - } catch (InterruptedException e) { - Assertions.fail(); - } - /* - Removing first 10 concurrently + this.addAllEntitiesConcurrently(true); + + /* + All entities should be retrievable. */ - int removeTill = concurrencyDegree / 2; - CountDownLatch doneRemove = new CountDownLatch(removeTill); - Thread[] removeThreads = new Thread[removeTill]; - for (int i = 0; i < removeTill; i++) { + this.checkForGetConcurrently(0); + this.checkForHasConcurrently(0); + this.checkForAllConcurrently(0); + + /* + Removing first 10 concurrently + */ + this.removeEntityTill(10); + + /* + first five blocks must not be retrievable, + the rest must be. + */ + this.checkForGetConcurrently(10); + this.checkForHasConcurrently(10); + this.checkForAllConcurrently(10); + } + + + /** + * Removes entities from blocks storage database till the given index concurrently. + * + * @param till exclusive index of the last entity being removed. + */ + private void removeEntityTill(int till) { + AtomicInteger threadError = new AtomicInteger(); + CountDownLatch doneRemove = new CountDownLatch(till); + Thread[] removeThreads = new Thread[till]; + for (int i = 0; i < till; i++) { int finalI = i; removeThreads[i] = new Thread(() -> { if (!db.remove(allEntities.get(finalI))) { @@ -302,74 +224,17 @@ void concurrentRemoveFirstTenTest() throws IOException { } catch (InterruptedException e) { Assertions.fail(); } - /* - Check Has method after removing. - */ - CountDownLatch doneHas = new CountDownLatch(concurrencyDegree); - Thread[] hasThreads = new Thread[concurrencyDegree]; - for (int i = 0; i < allEntities.size(); i++) { - int finalI = i; - int finalI1 = i; - hasThreads[i] = new Thread(() -> { - if (allEntities.indexOf(allEntities.get(finalI)) < 10) { - if (db.has(allEntities.get(finalI1).id())) { - threadError.getAndIncrement(); - } - } else { - if (!db.has(allEntities.get(finalI).id())) { - threadError.getAndIncrement(); - } - } - doneHas.countDown(); - }); - } - - for (Thread t : hasThreads) { - t.start(); - } - try { - boolean doneOneTime = doneHas.await(60, TimeUnit.SECONDS); - Assertions.assertTrue(doneOneTime); - } catch (InterruptedException e) { - Assertions.fail(); - } - /* - Check get method after removing. - */ - CountDownLatch getDone = new CountDownLatch(concurrencyDegree / 2); - Thread[] getThreads = new Thread[concurrencyDegree / 2]; - for (int i = 0; i < concurrencyDegree / 2; i++) { - int finalI = i; - int finalI1 = i + 10; - getThreads[i] = new Thread(() -> { - if (allEntities.contains(db.get(allEntities.get(finalI).id())) - || !allEntities.contains(db.get(allEntities.get(finalI1).id()))) { - threadError.getAndIncrement(); - } - getDone.countDown(); - }); - } - - for (Thread t : getThreads) { - t.start(); - } - try { - boolean doneOneTime = getDone.await(60, TimeUnit.SECONDS); - Assertions.assertTrue(doneOneTime); - } catch (InterruptedException e) { - Assertions.fail(); - } Assertions.assertEquals(0, threadError.get()); - db.closeDb(); - try { - FileUtils.deleteDirectory(new File(tempdir.toString())); - } catch (IOException e) { - throw new IOException("could not delete directory"); - } + } /** - * Add 20 entities already exist and return false expected. + * Add 20 new entities sequentially (10 blocks, and 10 transactions) + * and check that all of them are added correctly, i.e., while adding each entity + * Add must return true, has returns true for each of them, and All returns list of all of them. Moreover, each + * entity is retrievable using its identifier (get). Then try Adding all of them again, and + * Add should return false for each of them, while has should still return true, and get should be + * able to retrieve the entity. * * @throws IOException for any unhappy path. */ @@ -410,24 +275,55 @@ void duplicationTest() throws IOException { } /** - * Concurrent version of duplication test. + * Add 20 new entities concurrently (10 blocks, and 10 transactions) + * and check that all of them are added correctly, i.e., while adding each entity + * Add must return true, has returns true for each of them, and All returns list of all of them. Moreover, each + * entity is retrievable using its identifier (get). Then try Adding all of them again, and + * Add should return false for each of them, while has should still return true, and get should be + * able to retrieve the entity. * - * @throws IOException for any unhappy path for dir deletion. */ @Test - void concurrentDuplicationTest() throws IOException { - int concurrencyDegree = 20; - - AtomicInteger threadError = new AtomicInteger(); - CountDownLatch addDone = new CountDownLatch(concurrencyDegree); - Thread[] addThreads = new Thread[concurrencyDegree]; + void concurrentDuplicationTest() { /* - Adding all transactions concurrently. + Adding all entities concurrently. + */ + this.addAllEntitiesConcurrently(true); + /* + All entities should be retrievable using their id or height. + */ + this.checkForGetConcurrently(0); + this.checkForHasConcurrently(0); + this.checkForAllConcurrently(0); + /* + Adding all entities again concurrently, all should fail due to duplication. + */ + this.addAllEntitiesConcurrently(false); + /* + Again, all entities should be retrievable using their id or height. */ + this.checkForGetConcurrently(0); + this.checkForHasConcurrently(0); + this.checkForAllConcurrently(0); + } + + /** + * Adds all entities to the distributed storage database till the given index concurrently. + * + * @param expectedResult expected boolean result after each insertion; true means entity added successfully, + * false means entity was not added successfully. + */ + private void addAllEntitiesConcurrently(boolean expectedResult) { + AtomicInteger threadError = new AtomicInteger(); + CountDownLatch addDone = new CountDownLatch(allEntities.size()); + Thread[] addThreads = new Thread[allEntities.size()]; + /* + Adding all blocks concurrently. + */ for (int i = 0; i < allEntities.size(); i++) { int finalI = i; addThreads[i] = new Thread(() -> { - if (!db.add(allEntities.get(finalI))) { + if (db.add(allEntities.get(finalI)) != expectedResult) { threadError.getAndIncrement(); } addDone.countDown(); @@ -442,17 +338,36 @@ void concurrentDuplicationTest() throws IOException { } catch (InterruptedException e) { Assertions.fail(); } - /* - Checking correctness of insertion by Has. - */ - CountDownLatch hasDone = new CountDownLatch(concurrencyDegree); - Thread[] hasThreads = new Thread[concurrencyDegree]; + + Assertions.assertEquals(0, threadError.get()); + } + + /** + * Checks existence of entities in the entity storage database starting from the given index. + * + * @param from inclusive index of the first entity to check. + */ + private void checkForHasConcurrently(int from) { + AtomicInteger threadError = new AtomicInteger(); + CountDownLatch hasDone = new CountDownLatch(allEntities.size()); + Thread[] hasThreads = new Thread[allEntities.size()]; for (int i = 0; i < allEntities.size(); i++) { int finalI = i; + Entity entity = allEntities.get(i); + hasThreads[i] = new Thread(() -> { - if (!db.has((allEntities.get(finalI)).id())) { - threadError.getAndIncrement(); + if (finalI < from) { + // blocks should not exist + if (this.db.has(entity.id())) { + threadError.incrementAndGet(); + } + } else { + // block should exist + if (!this.db.has(entity.id())) { + threadError.getAndIncrement(); + } } + hasDone.countDown(); }); } @@ -466,17 +381,39 @@ void concurrentDuplicationTest() throws IOException { } catch (InterruptedException e) { Assertions.fail(); } - /* - Checking correctness of insertion by GET. - */ - CountDownLatch getDone = new CountDownLatch(concurrencyDegree); - Thread[] getThreads = new Thread[concurrencyDegree]; + + Assertions.assertEquals(0, threadError.get()); + } + + /** + * Checks retrievability of entity from the distributed storage database starting from the given index. + * + * @param from inclusive index of the first entity to check. + */ + private void checkForGetConcurrently(int from) { + AtomicInteger threadError = new AtomicInteger(); + CountDownLatch getDone = new CountDownLatch(allEntities.size()); + Thread[] getThreads = new Thread[allEntities.size()]; for (int i = 0; i < allEntities.size(); i++) { int finalI = i; + Entity entity = allEntities.get(i); getThreads[i] = new Thread(() -> { - if (!allEntities.contains(db.get(allEntities.get(finalI).id()))) { - threadError.getAndIncrement(); + Entity got = db.get(entity.id()); + if (finalI < from) { + // blocks should not exist + if (got != null) { + threadError.incrementAndGet(); + } + } else { + // block should exist + if (!entity.equals(got)) { + threadError.getAndIncrement(); + } + if (!entity.id().equals(got.id())) { + threadError.getAndIncrement(); + } } + getDone.countDown(); }); } @@ -490,18 +427,33 @@ void concurrentDuplicationTest() throws IOException { } catch (InterruptedException e) { Assertions.fail(); } - /* - Retrieving all concurrently. - */ - CountDownLatch doneAll = new CountDownLatch(concurrencyDegree); - Thread[] allThreads = new Thread[concurrencyDegree]; + Assertions.assertEquals(0, threadError.get()); + } + + /** + * Checks retrievability of entities from the distributed storage database starting from the given index. + * + * @param from inclusive index of the first transaction to check. + */ + private void checkForAllConcurrently(int from) { + AtomicInteger threadError = new AtomicInteger(); + CountDownLatch doneAll = new CountDownLatch(allEntities.size()); + Thread[] allThreads = new Thread[allEntities.size()]; ArrayList all = db.all(); - Assertions.assertEquals(all.size(), 20); - for (int i = 0; i < all.size(); i++) { + for (int i = 0; i < allEntities.size(); i++) { int finalI = i; + final Entity entity = allEntities.get(i); allThreads[i] = new Thread(() -> { - if (!all.contains(allEntities.get(finalI))) { - threadError.getAndIncrement(); + if (finalI < from) { + // blocks should not exist + if (all.contains(entity)) { + threadError.incrementAndGet(); + } + } else { + // block should exist + if (!all.contains(entity)) { + threadError.getAndIncrement(); + } } doneAll.countDown(); }); @@ -516,87 +468,6 @@ void concurrentDuplicationTest() throws IOException { } catch (InterruptedException e) { Assertions.fail(); } - /* - Adding existing entities. - */ - CountDownLatch addDuplicateDone = new CountDownLatch(concurrencyDegree); - Thread[] addDuplicateThreads = new Thread[concurrencyDegree]; - /* - Adding all transactions concurrently. - */ - for (int i = 0; i < allEntities.size(); i++) { - int finalI = i; - addDuplicateThreads[i] = new Thread(() -> { - if (db.add(allEntities.get(finalI))) { - threadError.getAndIncrement(); - } - addDuplicateDone.countDown(); - }); - } - for (Thread t : addDuplicateThreads) { - t.start(); - } - try { - boolean doneOneTime = addDuplicateDone.await(60, TimeUnit.SECONDS); - Assertions.assertTrue(doneOneTime); - } catch (InterruptedException e) { - Assertions.fail(); - } - /* - Checking correctness of insertion by Has again. - */ - CountDownLatch hasDone2 = new CountDownLatch(concurrencyDegree); - Thread[] hasThreads2 = new Thread[concurrencyDegree]; - for (int i = 0; i < allEntities.size(); i++) { - int finalI = i; - hasThreads2[i] = new Thread(() -> { - if (!db.has((allEntities.get(finalI)).id())) { - threadError.getAndIncrement(); - } - hasDone2.countDown(); - }); - } - - for (Thread t : hasThreads2) { - t.start(); - } - try { - boolean doneOneTime = hasDone2.await(60, TimeUnit.SECONDS); - Assertions.assertTrue(doneOneTime); - } catch (InterruptedException e) { - Assertions.fail(); - } - /* - Checking correctness of insertion by Get again. - */ - CountDownLatch getDone2 = new CountDownLatch(concurrencyDegree); - Thread[] getThreads2 = new Thread[concurrencyDegree]; - for (int i = 0; i < allEntities.size(); i++) { - int finalI = i; - getThreads2[i] = new Thread(() -> { - if (!allEntities.contains(db.get(allEntities.get(finalI).id()))) { - threadError.getAndIncrement(); - } - getDone2.countDown(); - }); - } - - for (Thread t : getThreads2) { - t.start(); - } - try { - boolean doneOneTime = getDone2.await(60, TimeUnit.SECONDS); - Assertions.assertTrue(doneOneTime); - } catch (InterruptedException e) { - Assertions.fail(); - } Assertions.assertEquals(0, threadError.get()); - db.closeDb(); - try { - FileUtils.deleteDirectory(new File(tempdir.toString())); - } catch (IOException e) { - throw new IOException("could not delete directory"); - } } - } From 9d294ca9bfc4687b19c4853640cc53449d7cbbc2 Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Sun, 22 May 2022 11:15:31 +0300 Subject: [PATCH 29/31] apply revision for tests --- src/test/java/storage/DistributedMapDbTest.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/test/java/storage/DistributedMapDbTest.java b/src/test/java/storage/DistributedMapDbTest.java index 185e0e3c..c527c4d5 100644 --- a/src/test/java/storage/DistributedMapDbTest.java +++ b/src/test/java/storage/DistributedMapDbTest.java @@ -57,7 +57,6 @@ void cleanup() throws IOException { FileUtils.deleteDirectory(new File(tempdir.toString())); } - /** * When adding 20 new entities of different types (10 transactions and 10 blocks) sequentially, * the Add method must return true for all of them. Moreover, after @@ -195,7 +194,6 @@ void concurrentRemoveFirstTenTest() { this.checkForAllConcurrently(10); } - /** * Removes entities from blocks storage database till the given index concurrently. * From e188aaf0a815a4a270b2726ad8064fc6c0d4b0ec Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Sun, 22 May 2022 11:39:45 +0300 Subject: [PATCH 30/31] exception handling fixed --- src/main/java/storage/Distributed.java | 9 +- .../java/storage/mapdb/DistributedMapDb.java | 23 +++-- .../java/storage/DistributedMapDbTest.java | 85 ++++++------------- 3 files changed, 47 insertions(+), 70 deletions(-) diff --git a/src/main/java/storage/Distributed.java b/src/main/java/storage/Distributed.java index b4440251..02487be9 100644 --- a/src/main/java/storage/Distributed.java +++ b/src/main/java/storage/Distributed.java @@ -3,6 +3,7 @@ import java.util.ArrayList; import model.Entity; +import model.exceptions.CodecException; import model.lightchain.Identifier; /** @@ -25,7 +26,7 @@ public interface Distributed { * @return true if entity did not exist on the database, false if entity is already in * database. */ - boolean add(Entity e); + boolean add(Entity e) throws CodecException; /** * Removes entity with given identifier. @@ -34,7 +35,7 @@ public interface Distributed { * @return true if entity exists on database and removed successfully, false if entity does not exist on * database. */ - boolean remove(Entity e); + boolean remove(Entity e) throws CodecException; /** * Returns the entity with given identifier. @@ -42,12 +43,12 @@ public interface Distributed { * @param e identifier of the entity. * @return the entity itself if exists and null otherwise. */ - Entity get(Identifier e); + Entity get(Identifier e) throws CodecException; /** * Returns all entities stored in database. * * @return all stored entities in database. */ - ArrayList all(); + ArrayList all() throws CodecException; } diff --git a/src/main/java/storage/mapdb/DistributedMapDb.java b/src/main/java/storage/mapdb/DistributedMapDb.java index 511fb596..e4301985 100644 --- a/src/main/java/storage/mapdb/DistributedMapDb.java +++ b/src/main/java/storage/mapdb/DistributedMapDb.java @@ -5,6 +5,7 @@ import model.Entity; import model.codec.EncodedEntity; +import model.exceptions.CodecException; import model.lightchain.Identifier; import modules.codec.JsonEncoder; import org.mapdb.DB; @@ -59,12 +60,14 @@ public boolean has(Identifier entityId) { * database. */ @Override - public boolean add(Entity e) { + public boolean add(Entity e) throws CodecException { JsonEncoder encoder = new JsonEncoder(); boolean addBoolean; try { lock.writeLock().lock(); addBoolean = distributedMap.putIfAbsentBoolean(e.id().getBytes(), encoder.encode(e)); + } catch (CodecException ex) { + throw new CodecException("could not encode the entity", ex); } finally { lock.writeLock().unlock(); } @@ -79,12 +82,14 @@ public boolean add(Entity e) { * database. */ @Override - public boolean remove(Entity e) { + public boolean remove(Entity e) throws CodecException { JsonEncoder encoder = new JsonEncoder(); boolean removeBoolean; try { lock.writeLock().lock(); removeBoolean = distributedMap.remove(e.id().getBytes(), encoder.encode(e)); + } catch (CodecException exception) { + throw new CodecException("could not encode entity", exception); } finally { lock.writeLock().unlock(); } @@ -98,9 +103,9 @@ public boolean remove(Entity e) { * @return the entity itself if exists and null otherwise. */ @Override - public Entity get(Identifier entityId) { + public Entity get(Identifier entityId) throws CodecException { - Entity decodedEntity = null; + Entity decodedEntity; try { JsonEncoder encoder = new JsonEncoder(); @@ -110,8 +115,8 @@ public Entity get(Identifier entityId) { return null; } decodedEntity = encoder.decode(encodedEntity); - } catch (ClassNotFoundException e) { - //throw new ClassNotFoundException("could not found the class"+e); + } catch (CodecException e) { + throw new CodecException("could not found the class", e); } finally { lock.readLock().unlock(); } @@ -124,14 +129,14 @@ public Entity get(Identifier entityId) { * @return all stored entities in database. */ @Override - public ArrayList all() { + public ArrayList all() throws CodecException { JsonEncoder encoder = new JsonEncoder(); ArrayList allEntities = new ArrayList<>(); for (Object encodedEntity : distributedMap.values()) { try { allEntities.add(encoder.decode((EncodedEntity) encodedEntity)); - } catch (ClassNotFoundException e) { - //throw new ClassNotFoundException("could not found the class"+e); + } catch (CodecException e) { + throw new CodecException("could not found the class", e); } } return allEntities; diff --git a/src/test/java/storage/DistributedMapDbTest.java b/src/test/java/storage/DistributedMapDbTest.java index c527c4d5..7583405d 100644 --- a/src/test/java/storage/DistributedMapDbTest.java +++ b/src/test/java/storage/DistributedMapDbTest.java @@ -11,6 +11,7 @@ import java.util.concurrent.atomic.AtomicInteger; import model.Entity; +import model.exceptions.CodecException; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; @@ -68,7 +69,7 @@ void cleanup() throws IOException { * @throws IOException throw IOException. */ @Test - void sequentialAddTest() throws IOException { + void sequentialAddTest() throws IOException, CodecException { for (Entity entity : allEntities) { Assertions.assertTrue(db.add(entity)); } @@ -98,22 +99,13 @@ void sequentialAddTest() throws IOException { * After adding all entities * are done, each entity must be retrievable using both its id (get). Also, when * querying All method, list of all 20 entities must be returned. - * */ @Test - void concurrentAddTest() { - /* - Adding all blocks concurrently. - */ + void concurrentAddTest() throws CodecException { this.addAllEntitiesConcurrently(true); - - /* - All blocks should be retrievable - */ this.checkForHasConcurrently(0); this.checkForGetConcurrently(0); this.checkForAllConcurrently(0); - } /** @@ -131,7 +123,7 @@ void concurrentAddTest() { * @throws IOException for any unhappy path. */ @Test - void removeFirstTenTest() throws IOException { + void removeFirstTenTest() throws IOException, CodecException { for (Entity entity : allEntities) { Assertions.assertTrue(db.add(entity)); } @@ -164,31 +156,17 @@ void removeFirstTenTest() throws IOException { * and get should return null. But for the last 5 blocks and 5 transactions, has should return true, and get * should successfully retrieve the exact entity. * Also, All should return only the last 5 blocks and 5 transactions. - * */ @Test - void concurrentRemoveFirstTenTest() { - /* - Adding all entities concurrently. - */ + void concurrentRemoveFirstTenTest() throws CodecException { this.addAllEntitiesConcurrently(true); - /* - All entities should be retrievable. - */ this.checkForGetConcurrently(0); this.checkForHasConcurrently(0); this.checkForAllConcurrently(0); - /* - Removing first 10 concurrently - */ this.removeEntityTill(10); - /* - first five blocks must not be retrievable, - the rest must be. - */ this.checkForGetConcurrently(10); this.checkForHasConcurrently(10); this.checkForAllConcurrently(10); @@ -206,7 +184,11 @@ private void removeEntityTill(int till) { for (int i = 0; i < till; i++) { int finalI = i; removeThreads[i] = new Thread(() -> { - if (!db.remove(allEntities.get(finalI))) { + try { + if (!db.remove(allEntities.get(finalI))) { + threadError.getAndIncrement(); + } + } catch (CodecException e) { threadError.getAndIncrement(); } doneRemove.countDown(); @@ -237,7 +219,7 @@ private void removeEntityTill(int till) { * @throws IOException for any unhappy path. */ @Test - void duplicationTest() throws IOException { + void duplicationTest() throws IOException, CodecException { for (Entity entity : allEntities) { Assertions.assertTrue(db.add(entity)); } @@ -279,27 +261,17 @@ void duplicationTest() throws IOException { * entity is retrievable using its identifier (get). Then try Adding all of them again, and * Add should return false for each of them, while has should still return true, and get should be * able to retrieve the entity. - * */ @Test - void concurrentDuplicationTest() { - /* - Adding all entities concurrently. - */ + void concurrentDuplicationTest() throws CodecException { this.addAllEntitiesConcurrently(true); - /* - All entities should be retrievable using their id or height. - */ + this.checkForGetConcurrently(0); this.checkForHasConcurrently(0); this.checkForAllConcurrently(0); - /* - Adding all entities again concurrently, all should fail due to duplication. - */ + this.addAllEntitiesConcurrently(false); - /* - Again, all entities should be retrievable using their id or height. - */ + this.checkForGetConcurrently(0); this.checkForHasConcurrently(0); this.checkForAllConcurrently(0); @@ -315,13 +287,15 @@ private void addAllEntitiesConcurrently(boolean expectedResult) { AtomicInteger threadError = new AtomicInteger(); CountDownLatch addDone = new CountDownLatch(allEntities.size()); Thread[] addThreads = new Thread[allEntities.size()]; - /* - Adding all blocks concurrently. - */ + for (int i = 0; i < allEntities.size(); i++) { int finalI = i; addThreads[i] = new Thread(() -> { - if (db.add(allEntities.get(finalI)) != expectedResult) { + try { + if (db.add(allEntities.get(finalI)) != expectedResult) { + threadError.getAndIncrement(); + } + } catch (CodecException e) { threadError.getAndIncrement(); } addDone.countDown(); @@ -355,12 +329,10 @@ private void checkForHasConcurrently(int from) { hasThreads[i] = new Thread(() -> { if (finalI < from) { - // blocks should not exist if (this.db.has(entity.id())) { threadError.incrementAndGet(); } } else { - // block should exist if (!this.db.has(entity.id())) { threadError.getAndIncrement(); } @@ -369,7 +341,6 @@ private void checkForHasConcurrently(int from) { hasDone.countDown(); }); } - for (Thread t : hasThreads) { t.start(); } @@ -396,14 +367,17 @@ private void checkForGetConcurrently(int from) { int finalI = i; Entity entity = allEntities.get(i); getThreads[i] = new Thread(() -> { - Entity got = db.get(entity.id()); + Entity got = null; + try { + got = db.get(entity.id()); + } catch (CodecException e) { + threadError.incrementAndGet(); + } if (finalI < from) { - // blocks should not exist if (got != null) { threadError.incrementAndGet(); } } else { - // block should exist if (!entity.equals(got)) { threadError.getAndIncrement(); } @@ -411,7 +385,6 @@ private void checkForGetConcurrently(int from) { threadError.getAndIncrement(); } } - getDone.countDown(); }); } @@ -433,7 +406,7 @@ private void checkForGetConcurrently(int from) { * * @param from inclusive index of the first transaction to check. */ - private void checkForAllConcurrently(int from) { + private void checkForAllConcurrently(int from) throws CodecException { AtomicInteger threadError = new AtomicInteger(); CountDownLatch doneAll = new CountDownLatch(allEntities.size()); Thread[] allThreads = new Thread[allEntities.size()]; @@ -443,12 +416,10 @@ private void checkForAllConcurrently(int from) { final Entity entity = allEntities.get(i); allThreads[i] = new Thread(() -> { if (finalI < from) { - // blocks should not exist if (all.contains(entity)) { threadError.incrementAndGet(); } } else { - // block should exist if (!all.contains(entity)) { threadError.getAndIncrement(); } From 5406ee1fc163c9144ec071a515724a2761d2c42b Mon Sep 17 00:00:00 2001 From: akucukoduk16 Date: Mon, 23 May 2022 16:41:43 +0300 Subject: [PATCH 31/31] sytle fixed --- src/test/java/storage/DistributedMapDbTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/java/storage/DistributedMapDbTest.java b/src/test/java/storage/DistributedMapDbTest.java index 7583405d..1ea0c05a 100644 --- a/src/test/java/storage/DistributedMapDbTest.java +++ b/src/test/java/storage/DistributedMapDbTest.java @@ -173,7 +173,7 @@ void concurrentRemoveFirstTenTest() throws CodecException { } /** - * Removes entities from blocks storage database till the given index concurrently. + * Removes entities from distributed storage database till the given index concurrently. * * @param till exclusive index of the last entity being removed. */