Skip to main content
aboutsummaryrefslogtreecommitdiffstats
blob: c3ff517248e4db84d954196c5423daf415ddf409 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
/*******************************************************************************
 * Copyright (c) 2005, 2015 QNX Software Systems and others.
 *
 * This program and the accompanying materials
 * are made available under the terms of the Eclipse Public License 2.0
 * which accompanies this distribution, and is available at
 * https://www.eclipse.org/legal/epl-2.0/
 *
 * SPDX-License-Identifier: EPL-2.0
 *
 * Contributors:
 *     QNX - Initial API and implementation
 *     Symbian - Add some non-javadoc implementation notes
 *     Markus Schorn (Wind River Systems)
 *     IBM Corporation
 *     Sergey Prigogin (Google)
 *******************************************************************************/
package org.eclipse.cdt.internal.core.pdom.db;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.lang.ref.Reference;
import java.lang.ref.ReferenceQueue;
import java.lang.ref.SoftReference;
import java.nio.ByteBuffer;
import java.nio.channels.ClosedByInterruptException;
import java.nio.channels.ClosedChannelException;
import java.nio.channels.FileChannel;
import java.util.ArrayList;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;

import org.eclipse.cdt.core.CCorePlugin;
import org.eclipse.core.runtime.CoreException;
import org.eclipse.core.runtime.IStatus;
import org.eclipse.core.runtime.Status;
import org.eclipse.osgi.util.NLS;

import com.ibm.icu.text.MessageFormat;

/**
 * Database encapsulates access to a flat binary format file with a memory-manager-like API for
 * obtaining and releasing areas of storage (memory).
 *
 * @author Doug Schaefer
 */
/*
 * The file encapsulated is divided into Chunks of size CHUNK_SIZE, and a table of contents
 * mapping chunk index to chunk address is maintained. Chunk structure exists only conceptually -
 * it is not a structure that appears in the file.
 *
 * ===== The first chunk is used by Database itself for house-keeping purposes and has structure
 *
 * offset            content
 * 	                 _____________________________
 * 0                | version number
 * INT_SIZE         | pointer to head of linked list of blocks of size MIN_BLOCK_DELTAS*BLOCK_SIZE_DELTA
 * ..               | ...
 * INT_SIZE * m (1) | pointer to head of linked list of blocks of size (m + MIN_BLOCK_DELTAS) * BLOCK_SIZE_DELTA
 * DATA_AREA        | undefined (PDOM stores its own house-keeping data in this area)
 *
 * (1) where 2 <= m <= CHUNK_SIZE / BLOCK_SIZE_DELTA - MIN_BLOCK_DELTAS + 1
 *
 * ===== block structure
 *
 * offset            content
 * 	                 _____________________________
 * 0                | size of block (negative indicates in use, positive unused) (2 bytes)
 * PREV_OFFSET      | pointer to previous block (of same size) (only in free blocks)
 * NEXT_OFFSET      | pointer to next block (of same size) (only in free blocks)
 *
 */
public class Database {
	// Public for tests only, you shouldn't need these.
	public static final int INT_SIZE = 4;
	public static final int CHUNK_SIZE = 1024 * 4;
	public static final int OFFSET_IN_CHUNK_MASK = CHUNK_SIZE - 1;
	public static final int BLOCK_HEADER_SIZE = 2;
	public static final int BLOCK_SIZE_DELTA_BITS = 3;
	public static final int BLOCK_SIZE_DELTA = 1 << BLOCK_SIZE_DELTA_BITS;
	public static final int MIN_BLOCK_DELTAS = 2; // a block must at least be 2 + 2*4 bytes to link the free blocks.
	public static final int MAX_BLOCK_DELTAS = CHUNK_SIZE / BLOCK_SIZE_DELTA;
	public static final int MAX_MALLOC_SIZE = MAX_BLOCK_DELTAS * BLOCK_SIZE_DELTA - BLOCK_HEADER_SIZE;
	public static final int PTR_SIZE = 4; // size of a pointer in the database in bytes
	// The lower bound for TYPE_SIZE is 1 + PTR_SIZE, but a slightly larger space for types stored
	// inline produces in a slightly smaller overall database size.
	public static final int TYPE_SIZE = 2 + PTR_SIZE; // size of a type in the database in bytes
	public static final int VALUE_SIZE = 1 + PTR_SIZE; // size of a value in the database in bytes
	public static final int EVALUATION_SIZE = TYPE_SIZE; // size of an evaluation in the database in bytes
	public static final int EXECUTION_SIZE = TYPE_SIZE; // size of an execution in the database in bytes
	public static final int ARGUMENT_SIZE = TYPE_SIZE; // size of a template argument in the database in bytes
	public static final long MAX_DB_SIZE = ((long) 1 << (Integer.SIZE + BLOCK_SIZE_DELTA_BITS));

	public static final int VERSION_OFFSET = 0;
	public static final int DATA_AREA = (CHUNK_SIZE / BLOCK_SIZE_DELTA - MIN_BLOCK_DELTAS + 2) * INT_SIZE;

	private static final int BLOCK_PREV_OFFSET = BLOCK_HEADER_SIZE;
	private static final int BLOCK_NEXT_OFFSET = BLOCK_HEADER_SIZE + INT_SIZE;

	private final File fLocation;
	private final boolean fReadOnly;
	private RandomAccessFile fFile;
	private boolean fExclusiveLock; // Necessary for any write operation.
	private boolean fLocked; // Necessary for any operation.
	private boolean fIsMarkedIncomplete;

	private int fVersion;
	private final Chunk fHeaderChunk;
	private Chunk[] fChunks;
	private int fChunksUsed;
	private int fChunksAllocated;
	private ChunkCache fCache;

	private long malloced;
	private long freed;
	private long cacheHits;
	private long cacheMisses;

	/** Soft reference wrapper to keep track of the record for disposed strings. */
	private static class SoftStringRef extends SoftReference<IString> {
		private final long record;

		public SoftStringRef(IString referent, ReferenceQueue<? super IString> q) {
			super(referent, q);
			record = referent.getRecord();
		}

		public long getRecord() {
			return record;
		}
	}

	// a cache for strings which is used for btree lookups; soft refs ensure garbage collection
	private final Map<Long, Reference<IString>> stringCache = new ConcurrentHashMap<>();
	private final ReferenceQueue<IString> stringDisposal = new ReferenceQueue<>();

	/**
	 * Construct a new Database object, creating a backing file if necessary.
	 * @param location the local file path for the database
	 * @param cache the cache to be used optimization
	 * @param version the version number to store in the database (only applicable for new databases)
	 * @param openReadOnly whether this Database object will ever need writing to
	 * @throws CoreException
	 */
	public Database(File location, ChunkCache cache, int version, boolean openReadOnly) throws CoreException {
		try {
			fLocation = location;
			fReadOnly = openReadOnly;
			fCache = cache;
			openFile();

			int nChunksOnDisk = (int) (fFile.length() / CHUNK_SIZE);
			fHeaderChunk = new Chunk(this, 0);
			fHeaderChunk.fLocked = true; // Never makes it into the cache, needed to satisfy assertions.
			if (nChunksOnDisk <= 0) {
				fVersion = version;
				fChunks = new Chunk[1];
				fChunksUsed = fChunksAllocated = fChunks.length;
			} else {
				fHeaderChunk.read();
				fVersion = fHeaderChunk.getInt(VERSION_OFFSET);
				fChunks = new Chunk[nChunksOnDisk]; // chunk[0] is unused.
				fChunksUsed = fChunksAllocated = nChunksOnDisk;
			}
		} catch (IOException e) {
			throw new CoreException(new DBStatus(e));
		}
	}

	private void openFile() throws FileNotFoundException {
		fFile = new RandomAccessFile(fLocation, fReadOnly ? "r" : "rw"); //$NON-NLS-1$ //$NON-NLS-2$
	}

	void read(ByteBuffer buf, long position) throws IOException {
		int retries = 0;
		do {
			try {
				fFile.getChannel().read(buf, position);
				return;
			} catch (ClosedChannelException e) {
				// Bug 219834 file may have be closed by interrupting a thread during an I/O operation.
				reopen(e, ++retries);
			}
		} while (true);
	}

	void write(ByteBuffer buf, long position) throws IOException {
		int retries = 0;
		while (true) {
			try {
				fFile.getChannel().write(buf, position);
				return;
			} catch (ClosedChannelException e) {
				// Bug 219834 file may have be closed by interrupting a thread during an I/O operation.
				reopen(e, ++retries);
			}
		}
	}

	private void reopen(ClosedChannelException e, int attempt) throws ClosedChannelException, FileNotFoundException {
		// Only if the current thread was not interrupted we try to reopen the file.
		if (e instanceof ClosedByInterruptException || attempt >= 20) {
			throw e;
		}
		openFile();
	}

	public void transferTo(FileChannel target) throws IOException {
		assert fLocked;
		final FileChannel from = fFile.getChannel();
		long nRead = 0;
		long position = 0;
		long size = from.size();
		while (position < size) {
			nRead = from.transferTo(position, 4096 * 16, target);
			if (nRead == 0) {
				break; // Should not happen.
			} else {
				position += nRead;
			}
		}
	}

	public int getVersion() {
		return fVersion;
	}

	public void setVersion(int version) throws CoreException {
		assert fExclusiveLock;
		fHeaderChunk.putInt(VERSION_OFFSET, version);
		fVersion = version;
	}

	/**
	 * Empty the contents of the Database, make it ready to start again
	 * @throws CoreException
	 */
	public void clear(int version) throws CoreException {
		assert fExclusiveLock;
		removeChunksFromCache();

		fVersion = version;
		// Clear the first chunk.
		fHeaderChunk.clear(0, CHUNK_SIZE);
		// Chunks have been removed from the cache, so we may just reset the array of chunks.
		fChunks = new Chunk[] { null };
		fChunksUsed = fChunksAllocated = fChunks.length;
		try {
			fHeaderChunk.flush(); // Zero out header chunk.
			fFile.getChannel().truncate(CHUNK_SIZE); // Truncate database.
		} catch (IOException e) {
			CCorePlugin.log(e);
		}
		malloced = freed = 0;
		/*
		 * This is for debugging purposes in order to simulate having a very large PDOM database.
		 * This will set aside the specified number of chunks.
		 * Nothing uses these chunks so subsequent allocations come after these fillers.
		 * The special function createNewChunks allocates all of these chunks at once.
		 * 524288 for a file starting at 2G
		 * 8388608 for a file starting at 32G
		 *
		 */
		long setasideChunks = Long.getLong("org.eclipse.cdt.core.parser.pdom.dense.recptr.setaside.chunks", 0); //$NON-NLS-1$
		if (setasideChunks != 0) {
			setVersion(getVersion());
			createNewChunks((int) setasideChunks);
			flush();
		}
		// clear cache for strings which are used for btree searches
		clearStringCache();
	}

	private void removeChunksFromCache() {
		synchronized (fCache) {
			for (int i = 1; i < fChunks.length; i++) {
				Chunk chunk = fChunks[i];
				if (chunk != null) {
					fCache.remove(chunk);
					fChunks[i] = null;
				}
			}
		}
	}

	/**
	 * Return the Chunk that contains the given offset.
	 * @throws CoreException
	 */
	public Chunk getChunk(long offset) throws CoreException {
		if (offset < CHUNK_SIZE) {
			return fHeaderChunk;
		}
		long long_index = offset / CHUNK_SIZE;
		assert long_index < Integer.MAX_VALUE;

		synchronized (fCache) {
			assert fLocked;
			final int index = (int) long_index;
			if (index < 0 || index >= fChunks.length) {
				databaseCorruptionDetected();
			}
			Chunk chunk = fChunks[index];
			if (chunk == null) {
				cacheMisses++;
				chunk = new Chunk(this, index);
				chunk.read();
				// Put the chunk in fChunks after it was read successfully.
				fChunks[index] = chunk;
			} else {
				cacheHits++;
			}
			fCache.add(chunk, fExclusiveLock);
			return chunk;
		}
	}

	private void databaseCorruptionDetected() throws CoreException {
		String msg = MessageFormat.format(Messages.getString("Database.CorruptedDatabase"), //$NON-NLS-1$
				new Object[] { fLocation.getName() });
		throw new CoreException(new DBStatus(msg));
	}

	/**
	 * Allocate a block out of the database.
	 */
	public long malloc(final int datasize) throws CoreException {
		assert fExclusiveLock;
		assert datasize >= 0 && datasize <= MAX_MALLOC_SIZE;

		int needDeltas = (datasize + BLOCK_HEADER_SIZE + BLOCK_SIZE_DELTA - 1) / BLOCK_SIZE_DELTA;
		if (needDeltas < MIN_BLOCK_DELTAS) {
			needDeltas = MIN_BLOCK_DELTAS;
		}

		// Which block size.
		long freeblock = 0;
		int useDeltas;
		for (useDeltas = needDeltas; useDeltas <= MAX_BLOCK_DELTAS; useDeltas++) {
			freeblock = getFirstBlock(useDeltas * BLOCK_SIZE_DELTA);
			if (freeblock != 0)
				break;
		}

		// Get the block.
		Chunk chunk;
		if (freeblock == 0) {
			// Allocate a new chunk.
			freeblock = createNewChunk();
			useDeltas = MAX_BLOCK_DELTAS;
			chunk = getChunk(freeblock);
		} else {
			chunk = getChunk(freeblock);
			removeBlock(chunk, useDeltas * BLOCK_SIZE_DELTA, freeblock);
		}

		final int unusedDeltas = useDeltas - needDeltas;
		if (unusedDeltas >= MIN_BLOCK_DELTAS) {
			// Add in the unused part of our block.
			addBlock(chunk, unusedDeltas * BLOCK_SIZE_DELTA, freeblock + needDeltas * BLOCK_SIZE_DELTA);
			useDeltas = needDeltas;
		}

		// Make our size negative to show in use.
		final int usedSize = useDeltas * BLOCK_SIZE_DELTA;
		chunk.putShort(freeblock, (short) -usedSize);

		// Clear out the block, lots of people are expecting this.
		chunk.clear(freeblock + BLOCK_HEADER_SIZE, usedSize - BLOCK_HEADER_SIZE);

		malloced += usedSize;
		return freeblock + BLOCK_HEADER_SIZE;
	}

	private long createNewChunk() throws CoreException {
		assert fExclusiveLock;
		synchronized (fCache) {
			final int newChunkIndex = fChunksUsed; // fChunks.length;

			final Chunk chunk = new Chunk(this, newChunkIndex);
			chunk.fDirty = true;

			if (newChunkIndex >= fChunksAllocated) {
				int increment = Math.max(1024, fChunksAllocated / 20);
				Chunk[] newchunks = new Chunk[fChunksAllocated + increment];
				System.arraycopy(fChunks, 0, newchunks, 0, fChunksAllocated);

				fChunks = newchunks;
				fChunksAllocated += increment;
			}
			fChunksUsed += 1;
			fChunks[newChunkIndex] = chunk;

			fCache.add(chunk, true);
			long address = (long) newChunkIndex * CHUNK_SIZE;

			/*
			 * Non-dense pointers are at most 31 bits dense pointers are at most 35 bits Check the sizes here
			 * and throw an exception if the address is too large. By throwing the CoreException with the
			 * special status, the indexing operation should be stopped. This is desired since generally, once
			 * the max size is exceeded, there are lots of errors.
			 */
			if (address >= MAX_DB_SIZE) {
				Object bindings[] = { this.getLocation().getAbsolutePath(), MAX_DB_SIZE };
				throw new CoreException(
						new Status(IStatus.ERROR, CCorePlugin.PLUGIN_ID, CCorePlugin.STATUS_PDOM_TOO_LARGE,
								NLS.bind(CCorePlugin.getResourceString("pdom.DatabaseTooLarge"), bindings), null)); //$NON-NLS-1$
			}
			return address;
		}
	}

	/**
	 * For testing purposes, only.
	 */
	private long createNewChunks(int numChunks) throws CoreException {
		assert fExclusiveLock;
		synchronized (fCache) {
			final int oldLen = fChunks.length;
			Chunk[] newchunks = new Chunk[oldLen + numChunks];
			System.arraycopy(fChunks, 0, newchunks, 0, oldLen);
			for (int i = oldLen; i < oldLen + numChunks; i++) {
				newchunks[i] = null;
			}
			final Chunk chunk = new Chunk(this, oldLen + numChunks - 1);
			chunk.fDirty = true;
			newchunks[oldLen + numChunks - 1] = chunk;
			fChunks = newchunks;
			fCache.add(chunk, true);
			fChunksAllocated = oldLen + numChunks;
			fChunksUsed = oldLen + numChunks;
			return (long) (oldLen + numChunks - 1) * CHUNK_SIZE;
		}
	}

	private long getFirstBlock(int blocksize) throws CoreException {
		assert fLocked;
		return fHeaderChunk.getFreeRecPtr((blocksize / BLOCK_SIZE_DELTA - MIN_BLOCK_DELTAS + 1) * INT_SIZE);
	}

	private void setFirstBlock(int blocksize, long block) throws CoreException {
		assert fExclusiveLock;
		fHeaderChunk.putFreeRecPtr((blocksize / BLOCK_SIZE_DELTA - MIN_BLOCK_DELTAS + 1) * INT_SIZE, block);
	}

	private void removeBlock(Chunk chunk, int blocksize, long block) throws CoreException {
		assert fExclusiveLock;
		long prevblock = chunk.getFreeRecPtr(block + BLOCK_PREV_OFFSET);
		long nextblock = chunk.getFreeRecPtr(block + BLOCK_NEXT_OFFSET);
		if (prevblock != 0) {
			putFreeRecPtr(prevblock + BLOCK_NEXT_OFFSET, nextblock);
		} else { // We were the head.
			setFirstBlock(blocksize, nextblock);
		}

		if (nextblock != 0)
			putFreeRecPtr(nextblock + BLOCK_PREV_OFFSET, prevblock);
	}

	private void addBlock(Chunk chunk, int blocksize, long block) throws CoreException {
		assert fExclusiveLock;
		// Mark our size
		chunk.putShort(block, (short) blocksize);

		// Add us to the head of the list.
		long prevfirst = getFirstBlock(blocksize);
		chunk.putFreeRecPtr(block + BLOCK_PREV_OFFSET, 0);
		chunk.putFreeRecPtr(block + BLOCK_NEXT_OFFSET, prevfirst);
		if (prevfirst != 0)
			putFreeRecPtr(prevfirst + BLOCK_PREV_OFFSET, block);
		setFirstBlock(blocksize, block);
	}

	/**
	 * Free an allocated block.
	 *
	 * @param offset
	 */
	public void free(long offset) throws CoreException {
		assert fExclusiveLock;
		// TODO Look for opportunities to merge blocks
		long block = offset - BLOCK_HEADER_SIZE;
		Chunk chunk = getChunk(block);
		int blocksize = -chunk.getShort(block);
		if (blocksize < 0) {
			// Already freed.
			throw new CoreException(new Status(IStatus.ERROR, CCorePlugin.PLUGIN_ID, 0,
					"Already freed record " + offset, new Exception())); //$NON-NLS-1$
		}
		addBlock(chunk, blocksize, block);
		freed += blocksize;
		stringCache.remove(offset); // also remove record from string cache (if it exists)
	}

	public void putByte(long offset, byte value) throws CoreException {
		getChunk(offset).putByte(offset, value);
	}

	public byte getByte(long offset) throws CoreException {
		return getChunk(offset).getByte(offset);
	}

	public void putInt(long offset, int value) throws CoreException {
		getChunk(offset).putInt(offset, value);
	}

	public int getInt(long offset) throws CoreException {
		return getChunk(offset).getInt(offset);
	}

	public void putRecPtr(long offset, long value) throws CoreException {
		getChunk(offset).putRecPtr(offset, value);
	}

	public long getRecPtr(long offset) throws CoreException {
		return getChunk(offset).getRecPtr(offset);
	}

	private void putFreeRecPtr(long offset, long value) throws CoreException {
		getChunk(offset).putFreeRecPtr(offset, value);
	}

	private long getFreeRecPtr(long offset) throws CoreException {
		return getChunk(offset).getFreeRecPtr(offset);
	}

	public void put3ByteUnsignedInt(long offset, int value) throws CoreException {
		getChunk(offset).put3ByteUnsignedInt(offset, value);
	}

	public int get3ByteUnsignedInt(long offset) throws CoreException {
		return getChunk(offset).get3ByteUnsignedInt(offset);
	}

	public void putShort(long offset, short value) throws CoreException {
		getChunk(offset).putShort(offset, value);
	}

	public short getShort(long offset) throws CoreException {
		return getChunk(offset).getShort(offset);
	}

	public void putLong(long offset, long value) throws CoreException {
		getChunk(offset).putLong(offset, value);
	}

	public long getLong(long offset) throws CoreException {
		return getChunk(offset).getLong(offset);
	}

	public void putChar(long offset, char value) throws CoreException {
		getChunk(offset).putChar(offset, value);
	}

	public char getChar(long offset) throws CoreException {
		return getChunk(offset).getChar(offset);
	}

	public void clearBytes(long offset, int byteCount) throws CoreException {
		getChunk(offset).clear(offset, byteCount);
	}

	public void putBytes(long offset, byte[] data, int len) throws CoreException {
		getChunk(offset).put(offset, data, len);
	}

	public void putBytes(long offset, byte[] data, int dataPos, int len) throws CoreException {
		getChunk(offset).put(offset, data, dataPos, len);
	}

	public void getBytes(long offset, byte[] data) throws CoreException {
		getChunk(offset).get(offset, data);
	}

	public void getBytes(long offset, byte[] data, int dataPos, int len) throws CoreException {
		getChunk(offset).get(offset, data, dataPos, len);
	}

	public IString newString(String string) throws CoreException {
		return newString(string.toCharArray());
	}

	public IString newString(char[] chars) throws CoreException {
		int len = chars.length;
		int bytelen;
		final boolean useBytes = useBytes(chars);
		if (useBytes) {
			bytelen = len;
		} else {
			bytelen = 2 * len;
		}

		if (bytelen > ShortString.MAX_BYTE_LENGTH) {
			return addStringToCache(new LongString(this, chars, useBytes));
		} else {
			return addStringToCache(new ShortString(this, chars, useBytes));
		}
	}

	private boolean useBytes(char[] chars) {
		for (char c : chars) {
			if ((c & 0xff00) != 0)
				return false;
		}
		return true;
	}

	public IString getString(long offset) throws CoreException {
		final Reference<IString> cachedStringReference = stringCache.get(offset);
		if (cachedStringReference != null) {
			final IString cachedString = cachedStringReference.get();
			if (cachedString != null) {
				return cachedString; // string already cached, no need to re-retrieve it :-)
			}
		}
		final int l = getInt(offset);
		int bytelen = l < 0 ? -l : 2 * l;
		if (bytelen > ShortString.MAX_BYTE_LENGTH) {
			return addStringToCache(new LongString(this, offset));
		}
		return addStringToCache(new ShortString(this, offset));
	}

	private IString addStringToCache(IString string) {
		// add string to cache
		stringCache.put(string.getRecord(), new SoftStringRef(string, stringDisposal));
		// also remove keys from cache list upon garbage collection
		if (stringDisposal != null) {
			Reference<? extends IString> disposedRef = stringDisposal.poll();
			while (disposedRef instanceof SoftStringRef) {
				stringCache.remove(((SoftStringRef) disposedRef).getRecord());
				disposedRef = stringDisposal.poll();
			}
		}
		return string;
	}

	/**
	 * For debugging purposes, only.
	 */
	public void reportFreeBlocks() throws CoreException {
		System.out.println("Allocated size: " + fChunksUsed * CHUNK_SIZE); //$NON-NLS-1$
		System.out.println("malloc'ed: " + malloced); //$NON-NLS-1$
		System.out.println("free'd: " + freed); //$NON-NLS-1$
		System.out.println("wasted: " + (fChunksUsed * CHUNK_SIZE - (malloced - freed))); //$NON-NLS-1$
		System.out.println("Free blocks"); //$NON-NLS-1$
		for (int bs = MIN_BLOCK_DELTAS * BLOCK_SIZE_DELTA; bs <= CHUNK_SIZE; bs += BLOCK_SIZE_DELTA) {
			int count = 0;
			long block = getFirstBlock(bs);
			while (block != 0) {
				++count;
				block = getFreeRecPtr(block + BLOCK_NEXT_OFFSET);
			}
			if (count != 0)
				System.out.println("Block size: " + bs + "=" + count); //$NON-NLS-1$ //$NON-NLS-2$
		}
	}

	/**
	 * Closes the database.
	 * <p>
	 * The behavior of any further calls to the Database is undefined
	 * @throws CoreException
	 */
	public void close() throws CoreException {
		assert fExclusiveLock;
		flush();
		removeChunksFromCache();

		// Chunks have been removed from the cache, so we are fine.
		fHeaderChunk.clear(0, CHUNK_SIZE);
		fHeaderChunk.fDirty = false;
		fChunks = new Chunk[] { null };
		fChunksUsed = fChunksAllocated = fChunks.length;
		try {
			fFile.close();
		} catch (IOException e) {
			throw new CoreException(new DBStatus(e));
		}
		clearStringCache();
	}

	/**
	 * This method is public for testing purposes only.
	 */
	public File getLocation() {
		return fLocation;
	}

	/**
	 * Called from any thread via the cache, protected by {@link #fCache}.
	 */
	void releaseChunk(final Chunk chunk) {
		if (!chunk.fLocked) {
			fChunks[chunk.fSequenceNumber] = null;
		}
	}

	/**
	 * Returns the cache used for this database.
	 * @since 4.0
	 */
	public ChunkCache getChunkCache() {
		return fCache;
	}

	/**
	 * Asserts that database is used by one thread exclusively. This is necessary when doing
	 * write operations.
	 */
	public void setExclusiveLock() {
		fExclusiveLock = true;
		fLocked = true;
	}

	public void setLocked(boolean val) {
		fLocked = val;
	}

	public void giveUpExclusiveLock(final boolean flush) throws CoreException {
		if (fExclusiveLock) {
			try {
				ArrayList<Chunk> dirtyChunks = new ArrayList<>();
				synchronized (fCache) {
					for (int i = 1; i < fChunksUsed; i++) {
						Chunk chunk = fChunks[i];
						if (chunk != null) {
							if (chunk.fCacheIndex < 0) {
								// Locked chunk that has been removed from cache.
								if (chunk.fDirty) {
									dirtyChunks.add(chunk); // Keep in fChunks until it is flushed.
								} else {
									chunk.fLocked = false;
									fChunks[i] = null;
								}
							} else if (chunk.fLocked) {
								// Locked chunk, still in cache.
								if (chunk.fDirty) {
									if (flush) {
										dirtyChunks.add(chunk);
									}
								} else {
									chunk.fLocked = false;
								}
							} else {
								assert !chunk.fDirty; // Dirty chunks must be locked.
							}
						}
					}
				}
				// Also handles header chunk.
				flushAndUnlockChunks(dirtyChunks, flush);
			} finally {
				fExclusiveLock = false;
			}
		}
	}

	public void flush() throws CoreException {
		assert fLocked;
		if (fExclusiveLock) {
			try {
				giveUpExclusiveLock(true);
			} finally {
				setExclusiveLock();
			}
			return;
		}

		// Be careful as other readers may access chunks concurrently.
		ArrayList<Chunk> dirtyChunks = new ArrayList<>();
		synchronized (fCache) {
			for (int i = 1; i < fChunksUsed; i++) {
				Chunk chunk = fChunks[i];
				if (chunk != null && chunk.fDirty) {
					dirtyChunks.add(chunk);
				}
			}
		}

		// Also handles header chunk.
		flushAndUnlockChunks(dirtyChunks, true);

		// And clear string cache
		clearStringCache();
	}

	private void clearStringCache() {
		stringCache.clear();
		while (stringDisposal.poll() != null) {
		}
	}

	private void flushAndUnlockChunks(final ArrayList<Chunk> dirtyChunks, boolean isComplete) throws CoreException {
		assert !Thread.holdsLock(fCache);
		synchronized (fHeaderChunk) {
			final boolean haveDirtyChunks = !dirtyChunks.isEmpty();
			if (haveDirtyChunks || fHeaderChunk.fDirty) {
				markFileIncomplete();
			}
			if (haveDirtyChunks) {
				for (Chunk chunk : dirtyChunks) {
					if (chunk.fDirty) {
						chunk.flush();
					}
				}

				// Only after the chunks are flushed we may unlock and release them.
				synchronized (fCache) {
					for (Chunk chunk : dirtyChunks) {
						chunk.fLocked = false;
						if (chunk.fCacheIndex < 0) {
							fChunks[chunk.fSequenceNumber] = null;
						}
					}
				}
			}

			if (isComplete) {
				if (fHeaderChunk.fDirty || fIsMarkedIncomplete) {
					fHeaderChunk.putInt(VERSION_OFFSET, fVersion);
					fHeaderChunk.flush();
					fIsMarkedIncomplete = false;
				}
			}
		}
	}

	private void markFileIncomplete() throws CoreException {
		if (!fIsMarkedIncomplete) {
			fIsMarkedIncomplete = true;
			try {
				final ByteBuffer buf = ByteBuffer.wrap(new byte[4]);
				fFile.getChannel().write(buf, 0);
			} catch (IOException e) {
				throw new CoreException(new DBStatus(e));
			}
		}
	}

	public void resetCacheCounters() {
		cacheHits = cacheMisses = 0;
	}

	public long getCacheHits() {
		return cacheHits;
	}

	public long getCacheMisses() {
		return cacheMisses;
	}

	public long getSizeBytes() {
		try {
			return fFile.length();
		} catch (IOException e) {
		}
		return 0;
	}

	/**
	 * A Record Pointer is a pointer as returned by Database.malloc().
	 * This is a pointer to a block + BLOCK_HEADER_SIZE.
	 */
	public static void putRecPtr(final long value, byte[] buffer, int idx) {
		final int denseValue = value == 0 ? 0 : Chunk.compressFreeRecPtr(value - BLOCK_HEADER_SIZE);
		Chunk.putInt(denseValue, buffer, idx);
	}

	/**
	 * A Record Pointer is a pointer as returned by Database.malloc().
	 * This is a pointer to a block + BLOCK_HEADER_SIZE.
	 */
	public static long getRecPtr(byte[] buffer, final int idx) {
		int value = Chunk.getInt(buffer, idx);
		long address = Chunk.expandToFreeRecPtr(value);
		return address != 0 ? (address + BLOCK_HEADER_SIZE) : address;
	}
}

Back to the top