From 80526264fbebe8ab790458ee29ac4309ba5ce580 Mon Sep 17 00:00:00 2001 From: Flurina Fischer Date: Sat, 9 Dec 2023 14:33:55 +0100 Subject: [PATCH] threaded simple filecreation and writing values into them works --- .../polypheny/db/backup/BackupManager.java | 4 +- .../db/backup/datagatherer/ExecuteQuery.java | 270 ++++++++++++++++++ .../db/backup/datagatherer/GatherEntries.java | 237 ++++++++++++++- 3 files changed, 496 insertions(+), 15 deletions(-) create mode 100644 dbms/src/main/java/org/polypheny/db/backup/datagatherer/ExecuteQuery.java diff --git a/dbms/src/main/java/org/polypheny/db/backup/BackupManager.java b/dbms/src/main/java/org/polypheny/db/backup/BackupManager.java index d46ded122d..be6eb7f0e7 100644 --- a/dbms/src/main/java/org/polypheny/db/backup/BackupManager.java +++ b/dbms/src/main/java/org/polypheny/db/backup/BackupManager.java @@ -17,6 +17,7 @@ package org.polypheny.db.backup; import com.google.common.collect.ImmutableMap; +import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -49,7 +50,8 @@ public class BackupManager { @Getter private BackupInformationObject backupInformationObject; public static TransactionManager transactionManager = null; - public static int batchSize = 1; //#rows (100 for the beginning) + public static int batchSize = 2; //#rows (100 for the beginning) + public static int threadNumber = 8; //#cores (#cpu's) for now //private final Logger logger; diff --git a/dbms/src/main/java/org/polypheny/db/backup/datagatherer/ExecuteQuery.java b/dbms/src/main/java/org/polypheny/db/backup/datagatherer/ExecuteQuery.java new file mode 100644 index 0000000000..0f714bc406 --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/backup/datagatherer/ExecuteQuery.java @@ -0,0 +1,270 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup.datagatherer; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.charset.StandardCharsets; +import java.util.List; +import lombok.extern.slf4j.Slf4j; +import org.polypheny.db.PolyImplementation; +import org.polypheny.db.ResultIterator; +import org.polypheny.db.backup.BackupManager; +import org.polypheny.db.catalog.Catalog; +import org.polypheny.db.catalog.exceptions.GenericRuntimeException; +import org.polypheny.db.catalog.logistic.DataModel; +import org.polypheny.db.languages.LanguageManager; +import org.polypheny.db.languages.QueryLanguage; +import org.polypheny.db.processing.ImplementationContext.ExecutedContext; +import org.polypheny.db.processing.QueryContext; +import org.polypheny.db.transaction.Statement; +import org.polypheny.db.transaction.Transaction; +import org.polypheny.db.transaction.TransactionManager; +import org.polypheny.db.type.entity.PolyValue; + +@Slf4j +public class ExecuteQuery implements Runnable { + private TransactionManager transactionManager; + private String query; + private DataModel dataModel; + private long namespaceId; + private File dataFile; + + public ExecuteQuery( TransactionManager transactionManager, String query, DataModel dataModel, long namespaceId, File dataFile ) { + this.transactionManager = transactionManager; //TODO(FF): is transactionmanager thread safe to pass it like this?? + this.query = query; + this.dataModel = dataModel; + this.namespaceId = namespaceId; + this.dataFile = dataFile; + } + + + @Override + public void run() { + log.debug( "thread for gather entries entered" ); + Transaction transaction; + Statement statement = null; + PolyImplementation result; + + + switch ( dataModel ) { + case RELATIONAL: + //fileChannel (is blocking... does it matter?) or + // DataInputStream in = new DataInputStream(new BufferedInputStream(new FileInputStream(dataFile))); + + /* + //fileChannel way (randomaccessfile, nio) + try( + //DataOutputStream out = new DataOutputStream( new BufferedOutputStream( new FileOutputStream( dataFile ) ) ); //channel doesn't work with this + RandomAccessFile writer = new RandomAccessFile( dataFile, "rw" ); + FileChannel channel = writer.getChannel(); + + //method2 + FileOutputStream fos = new FileOutputStream( dataFile ); + FileChannel channel1 = fos.getChannel(); + + ) { + + transaction = transactionManager.startTransaction( Catalog.defaultUserId, false, "Backup Entry-Gatherer" ); + statement = transaction.createStatement(); + //TODO(FF): be aware for writing into file with batches that you dont overwrite the entries already in the file (evtl you need to read the whole file again + ExecutedContext executedQuery = LanguageManager.getINSTANCE().anyQuery( QueryContext.builder().language( QueryLanguage.from( "sql" ) ).query( query ).origin( "Backup Manager" ).transactionManager( transactionManager ).batch( BackupManager.batchSize ).namespaceId( namespaceId ).build(), statement ).get( 0 ); + ExecutedContext executedQuery1 = LanguageManager.getINSTANCE().anyQuery( QueryContext.builder().language( QueryLanguage.from( "sql" ) ).query( query ).origin( "Backup Manager" ).transactionManager( transactionManager ).namespaceId( Catalog.defaultNamespaceId ).build(), statement ).get( 0 ); + // in case of results + ResultIterator iter = executedQuery.getIterator(); + while ( iter.hasMoreRows() ) { + // liste mit tuples + List> resultsPerTable = iter.getNextBatch(); + log.info( resultsPerTable.toString() ); + //FIXME(FF): if this is array: [[1, PolyList(value=[PolyList(value=[PolyList(value=[PolyBigDecimal(value=111), PolyBigDecimal(value=112)]), PolyList(value=[PolyBigDecimal(value=121), PolyBigDecimal(value=122)])]), PolyList(value=[PolyList(value=[PolyBigDecimal(value=211), PolyBigDecimal(value=212)]), PolyList(value=[PolyBigDecimal(value=221), PolyBigDecimal(value=222)])])])]] + //value is shown correctly for tojson + + for ( List row : resultsPerTable ) { + for ( PolyValue polyValue : row ) { + String byteString = polyValue.serialize(); + //byte[] byteString2 = polyValue.serialize().getBytes(StandardCharsets.UTF_8); + String jsonString = polyValue.toTypedJson(); + + ByteBuffer buff = ByteBuffer.wrap(byteString.getBytes( StandardCharsets.UTF_8)); + channel.write( buff ); + + + //larger, testing easier, replace later + PolyValue deserialized = PolyValue.deserialize( byteString ); + PolyValue deserialized2 = PolyValue.fromTypedJson( jsonString, PolyValue.class ); + int jhg=87; + } + } + + // flush only batchwise? is this even possible? does it make sense? + + } + + } catch(Exception e){ + throw new GenericRuntimeException( "Error while starting transaction", e ); + } + + */ + + + // bufferedOutputStream, io way + try( + DataOutputStream out = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(dataFile), 32768)); + //DataInputStream in = new DataInputStream(new BufferedInputStream(new FileInputStream(dataFile))); + + //String result = in.readUTF(); + //in.close(); + + ) { + + transaction = transactionManager.startTransaction( Catalog.defaultUserId, false, "Backup Entry-Gatherer" ); + statement = transaction.createStatement(); + //TODO(FF): be aware for writing into file with batches that you dont overwrite the entries already in the file (evtl you need to read the whole file again + ExecutedContext executedQuery = LanguageManager.getINSTANCE().anyQuery( QueryContext.builder().language( QueryLanguage.from( "sql" ) ).query( query ).origin( "Backup Manager" ).transactionManager( transactionManager ).batch( BackupManager.batchSize ).namespaceId( namespaceId ).build(), statement ).get( 0 ); + ExecutedContext executedQuery1 = LanguageManager.getINSTANCE().anyQuery( QueryContext.builder().language( QueryLanguage.from( "sql" ) ).query( query ).origin( "Backup Manager" ).transactionManager( transactionManager ).namespaceId( Catalog.defaultNamespaceId ).build(), statement ).get( 0 ); + // in case of results + ResultIterator iter = executedQuery.getIterator(); + while ( iter.hasMoreRows() ) { + // liste mit tuples + List> resultsPerTable = iter.getNextBatch(); + log.info( resultsPerTable.toString() ); + //FIXME(FF): if this is array: [[1, PolyList(value=[PolyList(value=[PolyList(value=[PolyBigDecimal(value=111), PolyBigDecimal(value=112)]), PolyList(value=[PolyBigDecimal(value=121), PolyBigDecimal(value=122)])]), PolyList(value=[PolyList(value=[PolyBigDecimal(value=211), PolyBigDecimal(value=212)]), PolyList(value=[PolyBigDecimal(value=221), PolyBigDecimal(value=222)])])])]] + //value is shown correctly for tojson + + for ( List row : resultsPerTable ) { + for ( PolyValue polyValue : row ) { + String byteString = polyValue.serialize(); + byte[] byteBytes = polyValue.serialize().getBytes(StandardCharsets.UTF_8); + String jsonString = polyValue.toTypedJson(); + + //out.write( byteBytes ); + //out.write( byteString.getBytes( StandardCharsets.UTF_8 ) ); + out.writeChars( jsonString ); + + + //larger, testing easier, replace later + PolyValue deserialized = PolyValue.deserialize( byteString ); + PolyValue deserialized2 = PolyValue.fromTypedJson( jsonString, PolyValue.class ); + int jhg=87; + } + } + + // flush only batchwise? is this even possible? does it make sense? + + } + + } catch(Exception e){ + throw new GenericRuntimeException( "Error while starting transaction", e ); + } + + + + + /* + try { + // get a transaction and a statement + transaction = transactionManager.startTransaction( Catalog.defaultUserId, false, "Backup Entry-Gatherer" ); + statement = transaction.createStatement(); + //TODO(FF): be aware for writing into file with batches that you dont overwrite the entries already in the file (evtl you need to read the whole file again + ExecutedContext executedQuery = LanguageManager.getINSTANCE().anyQuery( QueryContext.builder().language( QueryLanguage.from( "sql" ) ).query( query ).origin( "Backup Manager" ).transactionManager( transactionManager ).batch( BackupManager.batchSize ).namespaceId( namespaceId ).build(), statement ).get( 0 ); + ExecutedContext executedQuery1 = LanguageManager.getINSTANCE().anyQuery( QueryContext.builder().language( QueryLanguage.from( "sql" ) ).query( query ).origin( "Backup Manager" ).transactionManager( transactionManager ).namespaceId( Catalog.defaultNamespaceId ).build(), statement ).get( 0 ); + // in case of results + ResultIterator iter = executedQuery.getIterator(); + while ( iter.hasMoreRows() ) { + // liste mit tuples + List> resultsPerTable = iter.getNextBatch(); + log.info( resultsPerTable.toString() ); + //FIXME(FF): if this is array: [[1, PolyList(value=[PolyList(value=[PolyList(value=[PolyBigDecimal(value=111), PolyBigDecimal(value=112)]), PolyList(value=[PolyBigDecimal(value=121), PolyBigDecimal(value=122)])]), PolyList(value=[PolyList(value=[PolyBigDecimal(value=211), PolyBigDecimal(value=212)]), PolyList(value=[PolyBigDecimal(value=221), PolyBigDecimal(value=222)])])])]] + //value is shown correctly for tojson + + for ( List row : resultsPerTable ) { + for ( PolyValue polyValue : row ) { + String test = polyValue.serialize(); + String jsonString = polyValue.toTypedJson(); //larger, testing easier, replace later + PolyValue deserialized = PolyValue.deserialize( test ); + PolyValue deserialized2 = PolyValue.fromTypedJson( jsonString, PolyValue.class ); // gives nullpointerexception + int jhg=87; + } + } + + } + + } catch ( Exception e ) { + throw new GenericRuntimeException( "Error while starting transaction", e ); + } + + */ + break; + + case DOCUMENT: + try(DataOutputStream out = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(dataFile), 32768));) + { + // get a transaction and a statement + transaction = transactionManager.startTransaction( Catalog.defaultUserId, false, "Backup Entry-Gatherer" ); + statement = transaction.createStatement(); + ExecutedContext executedQuery = LanguageManager.getINSTANCE().anyQuery( QueryContext.builder().language( QueryLanguage.from( "mql" ) ).query( query ).origin( "Backup Manager" ).transactionManager( transactionManager ).namespaceId( namespaceId ).build(), statement ).get( 0 ); + + ResultIterator iter = executedQuery.getIterator(); + while ( iter.hasMoreRows() ) { + // liste mit tuples + List> resultsPerCollection = iter.getNextBatch(); + out.writeChars( resultsPerCollection.toString() ); + log.info( resultsPerCollection.toString() ); + } + } catch ( Exception e ) { + throw new GenericRuntimeException( "Error while starting transaction", e ); + } + break; + + case GRAPH: + try(DataOutputStream out = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(dataFile), 32768));) + { + // get a transaction and a statement + transaction = transactionManager.startTransaction( Catalog.defaultUserId, false, "Backup Entry-Gatherer" ); + statement = transaction.createStatement(); + ExecutedContext executedQuery = LanguageManager.getINSTANCE().anyQuery( QueryContext.builder().language( QueryLanguage.from( "cypher" ) ).query( query ).origin( "Backup Manager" ).transactionManager( transactionManager ).namespaceId( namespaceId ).build(), statement ).get( 0 ); + + ResultIterator iter = executedQuery.getIterator(); + while ( iter.hasMoreRows() ) { + // liste mit tuples + List> graphPerNamespace = iter.getNextBatch(); + log.info( graphPerNamespace.toString() ); + } + } catch ( Exception e ) { + throw new GenericRuntimeException( "Error while starting transaction", e ); + } + break; + + default: + throw new GenericRuntimeException( "Backup - GatherEntries: DataModel not supported" ); + } + + } + + private void createFile(String path) { + + } + +} diff --git a/dbms/src/main/java/org/polypheny/db/backup/datagatherer/GatherEntries.java b/dbms/src/main/java/org/polypheny/db/backup/datagatherer/GatherEntries.java index 9286fbb7d7..2276f0b6e2 100644 --- a/dbms/src/main/java/org/polypheny/db/backup/datagatherer/GatherEntries.java +++ b/dbms/src/main/java/org/polypheny/db/backup/datagatherer/GatherEntries.java @@ -16,13 +16,33 @@ package org.polypheny.db.backup.datagatherer; +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.BufferedWriter; +import java.io.DataInputStream; +import java.io.DataOutputStream; import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.FileWriter; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.file.Files; +import java.util.Arrays; +import java.util.Date; import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import lombok.Getter; +import lombok.Setter; import lombok.extern.slf4j.Slf4j; import org.polypheny.db.PolyImplementation; import org.polypheny.db.ResultIterator; import org.polypheny.db.backup.BackupManager; import org.polypheny.db.catalog.Catalog; +import org.polypheny.db.catalog.exceptions.GenericRuntimeException; import org.polypheny.db.catalog.logistic.DataModel; import org.polypheny.db.languages.LanguageManager; import org.polypheny.db.languages.QueryLanguage; @@ -41,6 +61,11 @@ public class GatherEntries { private final List> tablesToBeCollected; private final List> collectionsToBeCollected; private final List graphNamespaceIds; + //private final int = hal.getProcessor().getPhysicalProcessorCount(); + + private File backupFolder = null; + @Getter + private File dataFolder = null; @@ -57,12 +82,21 @@ public GatherEntries( TransactionManager transactionManager, List table : tablesToBeCollected) { //TODO(FF): exclude default columns? no, how do you differentiate for each line if it is not a default value String query = String.format( "SELECT * FROM %s.%s" , table.getKey(), table.getValue() ); - executeQuery( query, DataModel.RELATIONAL, Catalog.defaultNamespaceId ); + //executeQuery2( query, DataModel.RELATIONAL, Catalog.defaultNamespaceId ); + + File tableData = fileSystemManager.registerNewFile( getDataFolder(), String.format( "tab_%s_%s.txt", table.getKey() , table.getValue() )); + executorService.submit( new ExecuteQuery( transactionManager, query, DataModel.RELATIONAL, Catalog.defaultNamespaceId , tableData) ); } /* for ( String nsTableName : tablesToBeCollected ) { @@ -74,7 +108,9 @@ public void start() { if (!collectionsToBeCollected.isEmpty()){ for ( Pair collection : collectionsToBeCollected ) { String query = String.format( "db.%s.find()", collection.getValue() ); - executeQuery( query, DataModel.DOCUMENT, collection.getKey() ); + //executeQuery2( query, DataModel.DOCUMENT, collection.getKey() ); + File collectionData = fileSystemManager.registerNewFile( getDataFolder(), String.format( "col_%s.txt", collection.getValue() )); + executorService.submit( new ExecuteQuery( transactionManager, query, DataModel.DOCUMENT, collection.getKey(), collectionData ) ); } } @@ -82,19 +118,22 @@ public void start() { for ( Long graphNamespaceId : graphNamespaceIds ) { //String query = "MATCH (n) RETURN n"; String query = "MATCH (*) RETURN n"; //todo: result is polygraph - executeQuery( query, DataModel.GRAPH, graphNamespaceId ); + //executeQuery2( query, DataModel.GRAPH, graphNamespaceId ); + File graphData = fileSystemManager.registerNewFile( getDataFolder(), String.format( "graph_%s.txt", graphNamespaceId.toString() )); + executorService.submit( new ExecuteQuery( transactionManager, query, DataModel.GRAPH, graphNamespaceId, graphData ) ); } } log.info( "collected entry data" ); - initializeFileLocation(); + //initializeFileLocation(); log.info( "folder was created" ); + executorService.shutdown(); } // Gather entries with select statements - private void executeQuery( String query, DataModel dataModel, long namespaceId ) { + private void executeQuery2( String query, DataModel dataModel, long namespaceId ) { log.debug( "gather entries" ); Transaction transaction; @@ -107,6 +146,11 @@ private void executeQuery( String query, DataModel dataModel, long namespaceId ) 09:50:10.578 INFO [JettyServerThreadPool-32]: [[{email:jane@example.com,name:Jane Doe,_id:6570348c4023777b64ff8be8}], [{email:jim@example.com,name:Jim Doe,_id:6570348c4023777b64ff8be9}]] 09:50:12.880 INFO [JettyServerThreadPool-32]: [[PolyNode{id=5823e305-17bb-4fb7-bd17-2108a91acb70, properties=PolyMap(map={age=45, name=Ann, depno=13}), labels=PolyList(value=[Person])}], [PolyNode{id=e8772eff-10ab-4436-a693-9e3f2f0af6d2, properties=PolyMap(map={age=30, name=John, depno=13}), labels=PolyList(value=[Person2])}]] + + if the batchsize is 1: this is printed + 17:32:48.142 INFO [JettyServerThreadPool-418]: [[1, Best Album Ever!, 10]] + 17:33:53.853 INFO [JettyServerThreadPool-418]: [[2, Pretty Decent Album..., 15]] + 17:33:53.858 INFO [JettyServerThreadPool-418]: [[3, Your Ears will Bleed!, 13]] */ switch ( dataModel ) { @@ -208,22 +252,187 @@ private void executeQuery( String query, DataModel dataModel, long namespaceId ) */ } + private static void initFileTest() { + // this creates (only folders): dataa>cottontaildb-store>store23>dataIntern + PolyphenyHomeDirManager fileSystemManager = PolyphenyHomeDirManager.getInstance(); + File adapterRoot = fileSystemManager.registerNewFolder( "dataaa/cottontaildb-store" ); + + File embeddedDir = fileSystemManager.registerNewFolder( adapterRoot, "store" + 23 ); + File testFolder = fileSystemManager.registerNewFolder( "dataaa/cottontaildb-store/test" ); // works too + + final File dataFolder = fileSystemManager.registerNewFolder( embeddedDir, "dataIntern" ); + + } + private static void initializeFileLocation() { PolyphenyHomeDirManager homeDirManager = PolyphenyHomeDirManager.getInstance(); - File applicationConfDir = null; //todo: wär eig class field (private static) - // String currentConfigurationDirectoryName = DEFAULT_CONFIGURATION_DIRECTORY_NAME; //static string in class field - String currentConfigurationDirectoryName = "backup"; - String currentConfigurationFileName = "backup.bu"; - File applicationConfFile = homeDirManager.registerNewFolder( currentConfigurationDirectoryName ); //there is complicated thing in ConfigManager>loadConfigFile() + File folder = null; //todo: wär eig class field (private static) + // String folderName = DEFAULT_CONFIGURATION_DIRECTORY_NAME; //static string in class field + Date date = new java.util.Date(); + String datum = date.toString(); + String folderName = "backup"; + String fileName = "backup.txt"; + File file = homeDirManager.registerNewFolder( folderName ); //there is complicated thing in ConfigManager>loadConfigFile() // Create config directory and file if they do not already exist //PolyphenyHomeDirManager homeDirManager = PolyphenyHomeDirManager.getInstance(); - if ( applicationConfDir == null ) { - applicationConfDir = homeDirManager.registerNewFolder( currentConfigurationDirectoryName ); + + + if ( folder == null ) { + folder = homeDirManager.registerNewFolder( folderName ); } else { - applicationConfDir = homeDirManager.registerNewFolder( applicationConfDir.getParentFile(), currentConfigurationDirectoryName ); + folder = homeDirManager.registerNewFolder( folder.getParentFile(), folderName ); + } + file = homeDirManager.registerNewFile( folder, fileName ); + + + + + + // For a large amount of data, we will require a better raw performance. + // In this case, buffered methods like BufferedWriter and Files.write() can offer improved efficiency. + // Use FileChannel to write larger files. It is the preferred way of writing files in Java 8 as well. + // https://howtodoinjava.com/java/io/java-write-to-file/ + + + + + // this is apparently also an option; + try { + String str = "lisjlk"; + byte[] strToBytes = str.getBytes(); + + Files.write( file.toPath(), strToBytes); + + String read = Files.readAllLines(file.toPath()).get(0); + } catch ( Exception e ) { + throw new RuntimeException( e ); + } + + // apparently this is slower, no buffered is okee + try { + BufferedWriter writer = new BufferedWriter( new FileWriter( file ), 32768 ); + writer.write( "test2" ); + writer.close(); + } catch ( IOException e ) { + throw new GenericRuntimeException( e ); + } + + + //this is apparently faster, same as above (??) but for raw data + try { + //bufferedOutputStream?? + FileInputStream fis = new FileInputStream(new File("in.txt")); + FileOutputStream fos = new FileOutputStream(new File("out.txt")); + byte[] buffer = new byte[1024]; + int len; + while((len = fis.read(buffer)) != -1){ + fos.write(buffer, 0, len); + } + fos.close(); + fis.close(); + } catch ( IOException e ) { + throw new GenericRuntimeException( e ); + } + + //apparently this is superfast? https://stackoverflow.com/questions/8109762/dataoutputstream-vs-dataoutputstreamnew-bufferedoutputstream + try { + File dataFile = null; + FileOutputStream fos = new FileOutputStream(dataFile); + DataOutputStream out = new DataOutputStream(new BufferedOutputStream(fos)); + DataInputStream in = new DataInputStream(new BufferedInputStream(new FileInputStream(dataFile))); + + String str = "hello world"; + byte[] strToBytes = str.getBytes(); + out.write( strToBytes ); + + + out.writeUTF("test"); + out.close(); + String result = in.readUTF(); + in.close(); + + } catch ( Exception e ) { + throw new RuntimeException( e ); } - applicationConfFile = homeDirManager.registerNewFile( applicationConfDir, currentConfigurationFileName ); + + + //https://stackoverflow.com/questions/1605332/java-nio-filechannel-versus-fileoutputstream-performance-usefulness + //randomAccessFile: lets you start writing from specific point in file (after byte offset)... filechannel is from nio, apparently faster with large amount of data (+buffer) + try { + RandomAccessFile stream = new RandomAccessFile(fileName, "rw"); + FileChannel channel = stream.getChannel(); + String value = "Hello"; + byte[] strBytes = value.getBytes(); + ByteBuffer buffer = ByteBuffer.allocate(strBytes.length); + buffer.put(strBytes); + buffer.flip(); + channel.write(buffer); + stream.close(); + channel.close(); + + // verify + RandomAccessFile reader = new RandomAccessFile(fileName, "r"); + //assertEquals(value, reader.readLine()); + reader.close(); + } catch ( Exception e ) { + throw new RuntimeException( e ); + } + + } + + private void speedTest() { + char[] chars = new char[100*1024*1024]; + Arrays.fill(chars, 'A'); + String text = new String(chars); + long start = System.nanoTime(); + + try { + BufferedWriter bw = new BufferedWriter(new FileWriter("/tmp/a.txt")); + bw.write(text); + bw.close(); + } catch ( IOException e ) { + throw new RuntimeException( e ); + } + + long time = System.nanoTime() - start; + log.info("Wrote " + chars.length*1000L/time+" MB/s."); + // stackoverflow dude: Wrote 135 MB/s + } + + + //from: https://www.devinline.com/2013/09/write-to-file-in-java.html + // With buffered input:- Read input file and write to output file + /* + public static void writeBinaryStreamEfficient(File outputFile, File inputFile) { + int byteCoint; + Long starttime = System.currentTimeMillis(); + try { + FileInputStream is = new FileInputStream(inputFile); + // Buffered input stream and loop over buffered result + BufferedInputStream bis = new BufferedInputStream(is); + + FileOutputStream os = new FileOutputStream(outputFile); + BufferedOutputStream bos = new BufferedOutputStream(os); + while ((byteCoint = bis.read()) != -1) { + bos.write(byteCoint); + } + +//Closes this file input/output stream and releases any system resources associated with the stream. + is.close(); + os.close(); + + } catch (IOException e) { + e.printStackTrace(); + } + System.out.println("Total time spent in writing " + + "with buffered input is (in millisec) " + + (System.currentTimeMillis() - starttime)); + + } + + */ + }