indexFiles = Collections.emptyMap();
+
+ /**
+ * Not instantiable by clients.
+ */
+ private IndexPersistenceManager() {
+ super();
+ }
+
+ /**
+ * Initializes the persistence manager with the previous Eclipse session's
+ * saved state.
+ *
+ * @param state
+ * the previous session's state, or {@code null} if none
+ * (for example, if this is the first run)
+ *
+ * @throws CoreException
+ * on failure to initialize the index persistence manager
+ */
+ public void initialize(ISavedState state) throws CoreException {
+ indexFiles = Collections.unmodifiableMap(
+ Stream.of(state.getFiles())
+ .collect(Collectors.toMap(IPath::toString, state::lookup)));
+ }
+
+ /**
+ * Registers a persistent model index.
+ *
+ * @param index
+ * the index to register
+ * @param saveParticipant
+ * its workspace-save delegate
+ *
+ * @return an input stream providing the previous session's index data, or {@code null}
+ * if none is available, in which case presumably a full indexing is required.
+ * The caller is required to {@link InputStream#close() close} this stream
+ */
+ public InputStream addIndex(WorkspaceModelIndex> index, IIndexSaveParticipant saveParticipant) {
+ ZipInputStream result = null;
+
+ workspaceIndices.put(index, saveParticipant);
+
+ IPath indexFile = indexFiles.get(index.getName());
+ File storeFile = (indexFile != null) ? getStoreFile(indexFile) : null;
+ if (storeFile != null) {
+ if (storeFile.exists()) {
+ try {
+ result = new ZipInputStream(new FileInputStream(storeFile));
+
+ // Get the Contents entry
+ result.getNextEntry();
+ } catch (Exception e) {
+ Activator.log.error("Failed to open index file for " + index.getName(), e); //$NON-NLS-1$
+ }
+ }
+ }
+
+ return result;
+ }
+
+ /**
+ * Removes an index from the persistence manager.
+ *
+ * @param index
+ * the index to remove
+ */
+ public void removeIndex(WorkspaceModelIndex> index) {
+ workspaceIndices.remove(index);
+ }
+
+ private IPath getIndexLocation() {
+ return Activator.getDefault().getStateLocation().append(INDEX_DIR);
+ }
+
+ private File getStoreFile(IPath storePath) {
+ return Activator.getDefault().getStateLocation().append(storePath).toFile();
+ }
+
+ private IPath getStorePath(WorkspaceModelIndex> index, int saveNumber) {
+ return INDEX_DIR.append(index.getName()).addFileExtension(String.valueOf(saveNumber));
+ }
+
+ private IPath getStoreLocation(WorkspaceModelIndex> index, int saveNumber) {
+ return Activator.getDefault().getStateLocation().append(getStorePath(index, saveNumber));
+ }
+
+ /**
+ * Obtains a workspace save participant to which the bundle's main participant
+ * delegates the index portion of workspace save.
+ *
+ * Note that this delegate must never tell the {@link ISaveContext} that
+ * it needs a {@linkplain ISaveContext#needSaveNumber() save number} or a
+ * {@linkplain ISaveContext#needDelta() delta} as that is the responsibility
+ * of the bundle's save participant. Also, it is only ever invoked on a
+ * full workspace save.
+ *
+ *
+ * @return the workspace save participant delegate
+ */
+ public ISaveParticipant getSaveParticipant() {
+ return new ISaveParticipant() {
+
+ private Map newIndexFiles;
+
+ @Override
+ public void prepareToSave(ISaveContext context) throws CoreException {
+ // Ensure that our state location index directory exists
+ File indexDirectory = getIndexLocation().toFile();
+ if (!indexDirectory.exists()) {
+ indexDirectory.mkdir();
+ }
+ }
+
+ @Override
+ public void saving(ISaveContext context) throws CoreException {
+ // Save our indices
+ for (Map.Entry, IIndexSaveParticipant> next : workspaceIndices.entrySet()) {
+ WorkspaceModelIndex> index = next.getKey();
+ IIndexSaveParticipant save = next.getValue();
+
+ if (save != null) {
+ File storeFile = getStoreLocation(index, context.getSaveNumber()).toFile();
+
+ try (OutputStream store = createStoreOutput(storeFile)) {
+ save.save(index, store);
+ } catch (IOException e) {
+ storeFile.delete(); // In case there's something there, it can't be trusted
+ throw new CoreException(new Status(IStatus.ERROR, Activator.PLUGIN_ID,
+ "Failed to save index " + index.getName(), e)); //$NON-NLS-1$
+ }
+ }
+ }
+
+ // Compute the new index file mappings
+ newIndexFiles = workspaceIndices.keySet().stream()
+ .collect(Collectors.toMap(
+ WorkspaceModelIndex::getName,
+ index -> getStorePath(index, context.getSaveNumber())));
+
+ // Remove old index mappings
+ for (String next : indexFiles.keySet()) {
+ context.map(new Path(next), null);
+ }
+
+ // Add new index mappings
+ for (Map.Entry next : newIndexFiles.entrySet()) {
+ context.map(new Path(next.getKey()), next.getValue());
+ }
+ }
+
+ private OutputStream createStoreOutput(File storeFile) throws IOException {
+ ZipOutputStream result = new ZipOutputStream(new FileOutputStream(storeFile));
+ ZipEntry entry = new ZipEntry(ZIP_ENTRY);
+ result.putNextEntry(entry);
+ return result;
+ }
+
+ @Override
+ public void doneSaving(ISaveContext context) {
+ // Delete the old index files
+ try {
+ indexFiles.values().forEach(p -> getStoreFile(p).delete());
+ } catch (Exception e) {
+ // This doesn't stop us proceeding
+ Activator.log.error("Failed to clean up old index files", e); //$NON-NLS-1$
+ }
+
+ // Grab our new index files
+ indexFiles = newIndexFiles;
+ newIndexFiles = null;
+ }
+
+ @Override
+ public void rollback(ISaveContext context) {
+ try {
+ if (newIndexFiles != null) {
+ // Delete the new save files and mappings that we created
+ newIndexFiles.values().stream()
+ .map(IndexPersistenceManager.this::getStoreFile)
+ .forEach(File::delete);
+
+ // And the mappings
+ newIndexFiles.keySet().stream()
+ .map(Path::new)
+ .forEach(p -> context.map(p, null));
+
+ newIndexFiles = null;
+
+ // Then restore the old mappings
+ indexFiles.forEach((name, location) -> context.map(new Path(name), location));
+ }
+ } catch (Exception e) {
+ Activator.log.error("Failed to roll back model indices.", e); //$NON-NLS-1$
+ }
+
+ }
+ };
+ }
+
+}
diff --git a/plugins/infra/emf/org.eclipse.papyrus.infra.emf/src/org/eclipse/papyrus/infra/emf/internal/resource/index/InternalModelIndex.java b/plugins/infra/emf/org.eclipse.papyrus.infra.emf/src/org/eclipse/papyrus/infra/emf/internal/resource/index/InternalModelIndex.java
new file mode 100644
index 00000000000..739f84e2135
--- /dev/null
+++ b/plugins/infra/emf/org.eclipse.papyrus.infra.emf/src/org/eclipse/papyrus/infra/emf/internal/resource/index/InternalModelIndex.java
@@ -0,0 +1,118 @@
+/*****************************************************************************
+ * Copyright (c) 2016 Christian W. Damus and others.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Christian W. Damus - Initial API and implementation
+ *
+ *****************************************************************************/
+
+package org.eclipse.papyrus.infra.emf.internal.resource.index;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.ObjectInputStream;
+import java.io.ObjectStreamClass;
+import java.util.concurrent.Callable;
+
+import org.eclipse.core.resources.IFile;
+import org.eclipse.core.resources.IProject;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.QualifiedName;
+import org.eclipse.core.runtime.content.IContentType;
+import org.eclipse.papyrus.infra.emf.resource.index.WorkspaceModelIndex;
+
+import com.google.common.util.concurrent.ListenableFuture;
+
+/**
+ * Internal implementation details of a {@link WorkspaceModelIndex}.
+ */
+public abstract class InternalModelIndex {
+
+ private final QualifiedName indexKey;
+ private final int maxIndexJobs;
+
+ /** My manager. */
+ private IndexManager manager;
+
+ /** A class loader that knows the classes of the owner (bundle) context. */
+ private ClassLoader ownerClassLoader;
+
+ /**
+ * Initializes me.
+ */
+ public InternalModelIndex(QualifiedName indexKey, int maxIndexJobs) {
+ super();
+
+ this.indexKey = indexKey;
+ this.maxIndexJobs = maxIndexJobs;
+ }
+
+ /**
+ * Initializes me.
+ */
+ public InternalModelIndex(QualifiedName indexKey) {
+ this(indexKey, 0);
+ }
+
+ public final QualifiedName getIndexKey() {
+ return indexKey;
+ }
+
+ public final int getMaxIndexJobs() {
+ return maxIndexJobs;
+ }
+
+ protected final IContentType[] getContentTypes(IFile file) {
+ return manager.getContentTypes(file);
+ }
+
+ /**
+ * Obtains an asynchronous future result that is scheduled to run after
+ * any pending indexing work has completed.
+ *
+ * @param callable
+ * the operation to schedule
+ *
+ * @return the future result of the operation
+ */
+ protected ListenableFuture afterIndex(final Callable callable) {
+ return manager.afterIndex(this, callable);
+ }
+
+ void setOwnerClassLoader(ClassLoader ownerClassLoader) {
+ this.ownerClassLoader = ownerClassLoader;
+ }
+
+ protected final ObjectInputStream createObjectInput(InputStream underlying) throws IOException {
+ return (ownerClassLoader == null)
+ ? new ObjectInputStream(underlying)
+ : new ObjectInputStream(underlying) {
+ @Override
+ protected Class> resolveClass(ObjectStreamClass desc) throws IOException, ClassNotFoundException {
+ return Class.forName(desc.getName(), true, ownerClassLoader);
+ }
+ };
+ }
+
+ protected abstract void dispose();
+
+ void start(IndexManager manager) {
+ this.manager = manager;
+ start();
+ }
+
+ protected abstract void start();
+
+ protected abstract boolean match(IFile file);
+
+ protected abstract void process(IFile file) throws CoreException;
+
+ protected abstract void remove(IProject project, IFile file) throws CoreException;
+
+ protected abstract void remove(IProject project) throws CoreException;
+}
diff --git a/plugins/infra/emf/org.eclipse.papyrus.infra.emf/src/org/eclipse/papyrus/infra/emf/resource/ICrossReferenceIndex.java b/plugins/infra/emf/org.eclipse.papyrus.infra.emf/src/org/eclipse/papyrus/infra/emf/resource/ICrossReferenceIndex.java
new file mode 100644
index 00000000000..920eab7628f
--- /dev/null
+++ b/plugins/infra/emf/org.eclipse.papyrus.infra.emf/src/org/eclipse/papyrus/infra/emf/resource/ICrossReferenceIndex.java
@@ -0,0 +1,274 @@
+/*****************************************************************************
+ * Copyright (c) 2016 Christian W. Damus and others.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Christian W. Damus - Initial API and implementation
+ *
+ *****************************************************************************/
+
+package org.eclipse.papyrus.infra.emf.resource;
+
+import static org.eclipse.papyrus.infra.emf.internal.resource.InternalIndexUtil.getSemanticModelFileExtensions;
+
+import java.util.Set;
+
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.jobs.Job;
+import org.eclipse.emf.common.util.URI;
+import org.eclipse.emf.ecore.plugin.EcorePlugin;
+import org.eclipse.emf.ecore.resource.ResourceSet;
+import org.eclipse.papyrus.infra.emf.internal.resource.CrossReferenceIndex;
+import org.eclipse.papyrus.infra.emf.internal.resource.OnDemandCrossReferenceIndex;
+
+import com.google.common.collect.SetMultimap;
+import com.google.common.util.concurrent.ListenableFuture;
+
+
+/**
+ * API for an index of cross-resource proxy references in the workspace, especially
+ * containment proxies of the "shard" variety: controlled units that are not openable
+ * in their own editors but must be opened from the root resource of the controlled unit
+ * graph.
+ *
+ * @since 2.1
+ */
+public interface ICrossReferenceIndex {
+
+ /**
+ * Obtains the cross-reference index for the given resource set.
+ *
+ * @param resourceSet
+ * a resource-set in which resources are managed on which
+ * cross-reference queries are to be applied, or {@code null}
+ * if there is no contextual resource set, in which case
+ * the default heuristic- or otherwise-determined kinds of
+ * resources will be indexed
+ */
+ static ICrossReferenceIndex getInstance(ResourceSet resourceSet) {
+ ICrossReferenceIndex result;
+
+ if (!EcorePlugin.IS_ECLIPSE_RUNNING || Job.getJobManager().isSuspended()) {
+ // We cannot rely on jobs and the workspace to calculate the index
+ // in the background
+ result = new OnDemandCrossReferenceIndex(getSemanticModelFileExtensions(resourceSet));
+ } else {
+ result = CrossReferenceIndex.getInstance();
+ }
+
+ return result;
+ }
+
+ /**
+ * Asynchronously queries the mapping of URIs of resources to URIs of others
+ * that they cross-reference to.
+ *
+ * @return a future result of the mapping of resource URIs to cross-referenced URIs
+ */
+ ListenableFuture> getOutgoingCrossReferencesAsync();
+
+ /**
+ * Queries the mapping of URIs of resources to URIs of others
+ * that they cross-reference to.
+ *
+ * @return the mapping of resource URIs to cross-referenced URIs URIs
+ *
+ * @throws CoreException
+ * if the index either fails to compute the cross-references or if
+ * the calling thread is interrupted in waiting for the result
+ */
+ SetMultimap getOutgoingCrossReferences() throws CoreException;
+
+ /**
+ * Asynchronously queries the URIs of other resources that a given resource
+ * cross-references to.
+ *
+ * @param resourceURI
+ * the URI of a resource
+ * @return a future result of the resource URIs that it cross-references to
+ */
+ ListenableFuture> getOutgoingCrossReferencesAsync(URI resourceURI);
+
+ /**
+ * Queries the URIs of other resources that a given resource
+ * cross-references to.
+ *
+ * @param resourceURI
+ * the URI of a resource
+ * @return the resource URIs that it cross-references to
+ *
+ * @throws CoreException
+ * if the index either fails to compute the cross-references or if
+ * the calling thread is interrupted in waiting for the result
+ */
+ Set getOutgoingCrossReferences(URI resourceURI) throws CoreException;
+
+ /**
+ * Asynchronously queries the mapping of URIs of resources to URIs of others
+ * from which they are cross-referenced.
+ *
+ * @return a future result of the mapping of resource URIs to cross-referencing URIs
+ */
+ ListenableFuture> getIncomingCrossReferencesAsync();
+
+ /**
+ * Queries the mapping of URIs of resources to URIs of others
+ * from which they are cross-referenced.
+ *
+ * @return the mapping of resource URIs to cross-referencing URIs
+ *
+ * @throws CoreException
+ * if the index either fails to compute the cross-references or if
+ * the calling thread is interrupted in waiting for the result
+ */
+ SetMultimap getIncomingCrossReferences() throws CoreException;
+
+ /**
+ * Asynchronously queries the URIs of other resources that cross-reference to
+ * a given resource.
+ *
+ * @param resourceURI
+ * the URI of a resource
+ * @return a future result of the resource URIs that cross-reference to it
+ */
+ ListenableFuture> getIncomingCrossReferencesAsync(URI resourceURI);
+
+ /**
+ * Queries the URIs of other resources that cross-reference to
+ * a given resource.
+ *
+ * @param resourceURI
+ * the URI of a resource
+ * @return the resource URIs that cross-reference to it
+ *
+ * @throws CoreException
+ * if the index either fails to compute the cross-references or if
+ * the calling thread is interrupted in waiting for the result
+ */
+ Set getIncomingCrossReferences(URI resourceURI) throws CoreException;
+
+ /**
+ * Asynchronously queries whether a resource is a "shard".
+ *
+ * @param resourceURI
+ * the URI of a resource
+ * @return a future result of whether the resource is a "shard"
+ */
+ ListenableFuture isShardAsync(URI resourceURI);
+
+ /**
+ * Queries whether a resource is a "shard".
+ *
+ * @param resourceURI
+ * the URI of a resource
+ * @return whether the resource is a "shard"
+ *
+ * @throws CoreException
+ * if the index either fails to compute the shard-ness or if
+ * the calling thread is interrupted in waiting for the result
+ */
+ boolean isShard(URI resourceURI) throws CoreException;
+
+ /**
+ * Asynchronously queries the mapping of URIs of resources to URIs of shards that are their immediate
+ * children.
+ *
+ * @return a future result of the mapping of resource URIs to shard URIs
+ */
+ ListenableFuture> getShardsAsync();
+
+ /**
+ * Queries the mapping of URIs of resources to URIs of shards that are their immediate
+ * children.
+ *
+ * @return the mapping of resource URIs to shard URIs
+ *
+ * @throws CoreException
+ * if the index either fails to compute the shards or if
+ * the calling thread is interrupted in waiting for the result
+ */
+ SetMultimap getShards() throws CoreException;
+
+ /**
+ * Asynchronously queries the URIs of resources that are immediate shards of a
+ * given resource.
+ *
+ * @param resourceURI
+ * the URI of a resource
+ * @return a future result of the URIs of shards that are its immediate children
+ */
+ ListenableFuture> getShardsAsync(URI resourceURI);
+
+ /**
+ * Queries the URIs of resources that are immediate shards of a
+ * given resource.
+ *
+ * @param resourceURI
+ * the URI of a resource
+ * @return the URIs of shards that are its immediate children
+ *
+ * @throws CoreException
+ * if the index either fails to compute the shards or if
+ * the calling thread is interrupted in waiting for the result
+ */
+ Set getShards(URI resourceURI) throws CoreException;
+
+ /**
+ * Asynchronously queries URIs of resources that are immediate parents of a given
+ * (potential) shard resource.
+ *
+ * @param shardURI
+ * the URI of a potential shard resource. It needs not necessarily actually
+ * be a shard, in which case it trivially wouldn't have any parents
+ * @return the future result of the URIs of resources that are immediate parents of
+ * the shard
+ */
+ ListenableFuture> getParentsAsync(URI shardURI);
+
+ /**
+ * Queries URIs of resources that are immediate parents of a given
+ * (potential) shard resource.
+ *
+ * @param shardURI
+ * the URI of a potential shard resource. It needs not necessarily actually
+ * be a shard, in which case it trivially wouldn't have any parents
+ * @return the URIs of resources that are immediate parents of
+ * the shard
+ *
+ * @throws CoreException
+ * if the index either fails to compute the parents or if
+ * the calling thread is interrupted in waiting for the result
+ */
+ Set getParents(URI shardURI) throws CoreException;
+
+ /**
+ * Asynchronously queries URIs of resources that are roots (ultimate parents) of a given
+ * (potential) shard resource.
+ *
+ * @param shardURI
+ * the URI of a potential shard resource. It needs not necessarily actually
+ * be a shard, in which case it trivially wouldn't have any parents
+ * @return the future result of the URIs of resources that are roots of its parent graph
+ */
+ ListenableFuture> getRootsAsync(URI shardURI);
+
+ /**
+ * Queries URIs of resources that are roots (ultimate parents) of a given
+ * (potential) shard resource.
+ *
+ * @param shardURI
+ * the URI of a potential shard resource. It needs not necessarily actually
+ * be a shard, in which case it trivially wouldn't have any parents
+ * @return the URIs of resources that are roots of its parent graph
+ *
+ * @throws CoreException
+ * if the index either fails to compute the roots or if
+ * the calling thread is interrupted in waiting for the result
+ */
+ Set getRoots(URI shardURI) throws CoreException;
+
+}
diff --git a/plugins/infra/emf/org.eclipse.papyrus.infra.emf/src/org/eclipse/papyrus/infra/emf/resource/ShardResourceHelper.java b/plugins/infra/emf/org.eclipse.papyrus.infra.emf/src/org/eclipse/papyrus/infra/emf/resource/ShardResourceHelper.java
new file mode 100644
index 00000000000..29c004eb5c6
--- /dev/null
+++ b/plugins/infra/emf/org.eclipse.papyrus.infra.emf/src/org/eclipse/papyrus/infra/emf/resource/ShardResourceHelper.java
@@ -0,0 +1,418 @@
+/*****************************************************************************
+ * Copyright (c) 2016 Christian W. Damus and others.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Christian W. Damus - Initial API and implementation
+ *
+ *****************************************************************************/
+
+package org.eclipse.papyrus.infra.emf.resource;
+
+import static org.eclipse.papyrus.infra.emf.internal.resource.AbstractCrossReferenceIndex.SHARD_ANNOTATION_SOURCE;
+
+import java.util.Collection;
+import java.util.List;
+
+import org.eclipse.emf.common.command.Command;
+import org.eclipse.emf.common.command.CommandWrapper;
+import org.eclipse.emf.common.command.IdentityCommand;
+import org.eclipse.emf.common.notify.Adapter;
+import org.eclipse.emf.common.notify.Notification;
+import org.eclipse.emf.common.notify.Notifier;
+import org.eclipse.emf.common.notify.impl.AdapterImpl;
+import org.eclipse.emf.ecore.EAnnotation;
+import org.eclipse.emf.ecore.EModelElement;
+import org.eclipse.emf.ecore.EObject;
+import org.eclipse.emf.ecore.EcoreFactory;
+import org.eclipse.emf.ecore.EcorePackage;
+import org.eclipse.emf.ecore.InternalEObject;
+import org.eclipse.emf.ecore.resource.Resource;
+import org.eclipse.emf.ecore.util.EcoreUtil;
+import org.eclipse.emf.edit.command.AddCommand;
+import org.eclipse.emf.edit.command.RemoveCommand;
+import org.eclipse.emf.edit.domain.EditingDomain;
+import org.eclipse.papyrus.infra.emf.utils.EMFHelper;
+import org.eclipse.papyrus.infra.tools.util.TypeUtils;
+
+/**
+ * A convenience wrapper for {@link EObject}s and/or {@link Resource}s that
+ * are dependent "shard" units of a Papyrus model. A shard helper must
+ * always be {@linkplain #close() closed} after it is no longer needed,
+ * because it attaches adapters to the model.
+ *
+ * @since 2.1
+ */
+public class ShardResourceHelper implements AutoCloseable {
+
+ private final Resource resource;
+ private final EObject object;
+
+ private boolean closed;
+ private boolean initialized;
+
+ private EAnnotation annotation;
+ private Adapter annotationAdapter;
+
+ /**
+ * Initializes me on a shard {@code resource} that is expected to contain
+ * only one root element (it doesn't store multiple distinct sub-trees
+ * of the model).
+ *
+ * @param resource
+ * a "resource" resource
+ *
+ * @see #ShardResourceHelper(EObject)
+ */
+ public ShardResourceHelper(Resource resource) {
+ this(resource, null);
+ }
+
+ /**
+ * Initializes me on an {@code element} in a shard resource that uniquely
+ * identifies a sub-tree of potentially more than one stored in the resource.
+ * If there is any possibility that a resource stores multiple sub-trees,
+ * prefer this constructor over {@linkplain #ShardResourceHelper(Resource) the other}.
+ *
+ * @param element
+ * an element in a "resource" resource
+ */
+ public ShardResourceHelper(EObject element) {
+ this(element.eResource(), element);
+ }
+
+ private ShardResourceHelper(Resource resource, EObject object) {
+ super();
+
+ this.resource = resource;
+ this.object = object;
+ }
+
+ /**
+ * Is my resource a shard?
+ *
+ * @return whether my resource is a shard of its parent
+ */
+ public boolean isShard() {
+ return getAnnotation() != null;
+ }
+
+ /**
+ * Changes my resource from a shard to an independent controlled unit, or vice-versa.
+ * In the context of an editor and/or editing-domain, it is usually more appropriate
+ * to use the {@link #getSetShardCommand(boolean)} API for manipulation by command.
+ *
+ * @param isShard
+ * whether my resource should be a shard. If it already matches
+ * this state, then do nothing
+ *
+ * @see #getSetShardCommand(boolean)
+ */
+ public void setShard(boolean isShard) {
+ checkClosed();
+
+ if (isShard != isShard()) {
+ if (getAnnotation() != null) {
+ // We are un-sharding
+ EcoreUtil.remove(getAnnotation());
+ } else {
+ // We are sharding
+ EAnnotation annotation = EcoreFactory.eINSTANCE.createEAnnotation();
+ annotation.setSource(SHARD_ANNOTATION_SOURCE);
+ Notifier annotationOwner;
+
+ EObject shardElement = getShardElement();
+ if (shardElement instanceof EModelElement) {
+ // Add it to the shard element
+ ((EModelElement) shardElement).getEAnnotations().add(annotation);
+ annotationOwner = shardElement;
+ } else if (shardElement != null) {
+ // Add it after the shard element
+ int index = resource.getContents().indexOf(shardElement) + 1;
+ resource.getContents().add(index, annotation);
+ annotationOwner = resource;
+ } else {
+ // Try to add it after the principal model object
+ resource.getContents().add(Math.min(1, resource.getContents().size()), annotation);
+ annotationOwner = resource;
+ }
+
+ // In any case, the parent is the resource storing the element's container
+ if ((shardElement != null) && (shardElement.eContainer() != null)) {
+ annotation.getReferences().add(shardElement.eContainer());
+ }
+
+ setAnnotation(annotation);
+ attachAnnotationAdapter(annotationOwner);
+ }
+ }
+ }
+
+ /**
+ * Finds the element that is the root of the particular sub-tree stored in
+ * this resource, from the context provided by the client.
+ *
+ * @return the shard root element as best determined from the context, or
+ * {@code null} in the worst case that the resource is empty
+ */
+ private EObject getShardElement() {
+ checkClosed();
+
+ EObject result = null;
+
+ if (object != null) {
+ // Find the object in its content tree that is a root of our resource
+ for (result = object; result != null; result = result.eContainer()) {
+ InternalEObject internal = (InternalEObject) result;
+ if (internal.eDirectResource() == resource) {
+ // Found it
+ break;
+ }
+ }
+ }
+
+ if ((result == null) && !resource.getContents().isEmpty()) {
+ // Just take the first element as the shard element
+ result = resource.getContents().get(0);
+ }
+
+ return result;
+ }
+
+ /**
+ * Obtains a command to change my resource from a shard to an independent
+ * controlled unit, or vice-versa.
+ *
+ * @param isShard
+ * whether my resource should be a shard. If it already matches
+ * this state, then the resulting command will have no effect
+ *
+ * @return the set-shard command
+ *
+ * @see #setShard(boolean)
+ */
+ public Command getSetShardCommand(boolean isShard) {
+ Command result;
+
+ if (isShard() == isShard) {
+ result = IdentityCommand.INSTANCE;
+ } else if (getAnnotation() != null) {
+ // Delete the annotation
+ EAnnotation annotation = getAnnotation();
+ if (annotation.getEModelElement() != null) {
+ result = RemoveCommand.create(EMFHelper.resolveEditingDomain(annotation),
+ annotation.getEModelElement(),
+ EcorePackage.Literals.EMODEL_ELEMENT__EANNOTATIONS,
+ annotation);
+ } else {
+ result = new RemoveCommand(EMFHelper.resolveEditingDomain(resource),
+ resource.getContents(),
+ annotation);
+ }
+ } else {
+ // Create the annotation
+ EAnnotation annotation = EcoreFactory.eINSTANCE.createEAnnotation();
+ annotation.setSource(SHARD_ANNOTATION_SOURCE);
+
+ EditingDomain domain;
+ EObject shardElement = getShardElement();
+ Notifier annotationOwner;
+
+ if (shardElement instanceof EModelElement) {
+ // Add it to the shard element
+ domain = EMFHelper.resolveEditingDomain(shardElement);
+ result = AddCommand.create(domain, shardElement,
+ EcorePackage.Literals.EMODEL_ELEMENT__EANNOTATIONS,
+ annotation);
+ annotationOwner = shardElement;
+ } else if (shardElement != null) {
+ // Add it after the shard element
+ int index = resource.getContents().indexOf(shardElement) + 1;
+ domain = EMFHelper.resolveEditingDomain(shardElement);
+ result = new AddCommand(domain, resource.getContents(), annotation, index);
+ annotationOwner = resource;
+ } else {
+ // Try to add it after the principal model object
+ domain = EMFHelper.resolveEditingDomain(resource);
+ int index = Math.min(1, resource.getContents().size());
+ result = new AddCommand(domain, resource.getContents(), annotation, index);
+ annotationOwner = resource;
+ }
+
+ // In any case, the parent is the resource storing the element's container
+ if ((shardElement != null) && (shardElement.eContainer() != null)) {
+ result = result.chain(AddCommand.create(domain, annotation,
+ EcorePackage.Literals.EANNOTATION__REFERENCES,
+ shardElement.eContainer()));
+ }
+
+ // Ensure attachment of the adapter on first execution and record the
+ // annotation, if not already closed
+ result = new CommandWrapper(result) {
+ @Override
+ public void execute() {
+ super.execute();
+
+ if (!ShardResourceHelper.this.isClosed()) {
+ setAnnotation(annotation);
+ attachAnnotationAdapter(annotationOwner);
+ }
+ }
+ };
+ }
+
+ return result;
+ }
+
+ /**
+ * Closes me, ensuring at least that any adapter I have attached to the model
+ * that retains me is detached. Once I have been closed, I cannot be used
+ * any longer.
+ */
+ @Override
+ public void close() {
+ closed = true;
+
+ doClose();
+ }
+
+ protected void doClose() {
+ clearAnnotation();
+ detachAnnotationAdapter();
+ }
+
+ /**
+ * Queries whether I have been {@linkplain #close() closed}.
+ *
+ * @return whether I have been closed
+ */
+ public final boolean isClosed() {
+ return closed;
+ }
+
+ protected final void checkClosed() {
+ if (isClosed()) {
+ throw new IllegalStateException("closed"); //$NON-NLS-1$
+ }
+ }
+
+ private EAnnotation getAnnotation() {
+ checkClosed();
+
+ if (!initialized) {
+ setAnnotation(findAnnotation());
+ initialized = true;
+ }
+
+ return annotation;
+ }
+
+ private EAnnotation findAnnotation() {
+ EAnnotation result = null;
+
+ if (!resource.getContents().isEmpty()) {
+ EObject shardElement = getShardElement();
+ Notifier annotationOwner;
+
+ if (shardElement instanceof EModelElement) {
+ result = ((EModelElement) shardElement).getEAnnotation(SHARD_ANNOTATION_SOURCE);
+ annotationOwner = shardElement;
+ } else {
+ // Maybe it's just in the resource?
+ List contents = resource.getContents();
+ annotationOwner = resource;
+
+ if (shardElement != null) {
+ int index = contents.indexOf(shardElement) + 1;
+ if (index < contents.size()) {
+ EAnnotation maybe = TypeUtils.as(contents.get(index), EAnnotation.class);
+ if ((maybe != null) && SHARD_ANNOTATION_SOURCE.equals(maybe.getSource())) {
+ // That's it
+ result = maybe;
+ }
+ }
+ }
+
+ if ((result == null) && (object == null)) {
+ // If we don't have a specific sub-tree in mind, look for any
+ // shard annotation
+ result = contents.stream()
+ .filter(EAnnotation.class::isInstance).map(EAnnotation.class::cast)
+ .filter(a -> SHARD_ANNOTATION_SOURCE.equals(a.getSource()))
+ .findFirst().orElse(null);
+ }
+ }
+
+ if (result != null) {
+ attachAnnotationAdapter(annotationOwner);
+ }
+ }
+
+ return result;
+ }
+
+ private void clearAnnotation() {
+ initialized = false;
+ setAnnotation(null);
+ }
+
+ private void setAnnotation(EAnnotation annotation) {
+ this.annotation = annotation;
+ }
+
+ private void attachAnnotationAdapter(Notifier annotationOwner) {
+ // If we still have the annotation, then it's still attached
+ if (annotationAdapter == null) {
+ annotationAdapter = new AdapterImpl() {
+ @Override
+ public void notifyChanged(Notification msg) {
+ if (msg.getEventType() == Notification.REMOVING_ADAPTER) {
+ // My target was unloaded
+ clearAnnotation();
+ } else if ((msg.getFeature() == EcorePackage.Literals.EMODEL_ELEMENT__EANNOTATIONS)
+ || ((msg.getNotifier() == resource) && (msg.getFeatureID(Resource.class) == Resource.RESOURCE__CONTENTS))) {
+
+ // Annotation of the model element or resource changed
+ boolean clear = false;
+
+ switch (msg.getEventType()) {
+ case Notification.SET:
+ case Notification.UNSET:
+ case Notification.REMOVE:
+ clear = (msg.getOldValue() == getAnnotation());
+ break;
+ case Notification.ADD:
+ case Notification.ADD_MANY:
+ // If we don't have an annotation, we'll try to find it
+ clear = getAnnotation() == null;
+ break;
+ case Notification.REMOVE_MANY:
+ clear = ((Collection>) msg.getOldValue()).contains(getAnnotation());
+ break;
+ }
+
+ if (clear) {
+ // In case the annotation moved or was replaced,
+ // we'll compute it again on-the-fly
+ clearAnnotation();
+ }
+ }
+ }
+ };
+
+ annotationOwner.eAdapters().add(annotationAdapter);
+ }
+ }
+
+ private void detachAnnotationAdapter() {
+ if (annotationAdapter != null) {
+ Adapter adapter = annotationAdapter;
+ annotationAdapter = null;
+ adapter.getTarget().eAdapters().remove(adapter);
+ }
+ }
+}
diff --git a/plugins/infra/emf/org.eclipse.papyrus.infra.emf/src/org/eclipse/papyrus/infra/emf/resource/ShardResourceLocator.java b/plugins/infra/emf/org.eclipse.papyrus.infra.emf/src/org/eclipse/papyrus/infra/emf/resource/ShardResourceLocator.java
new file mode 100644
index 00000000000..397e693707a
--- /dev/null
+++ b/plugins/infra/emf/org.eclipse.papyrus.infra.emf/src/org/eclipse/papyrus/infra/emf/resource/ShardResourceLocator.java
@@ -0,0 +1,178 @@
+/*****************************************************************************
+ * Copyright (c) 2016 Christian W. Damus and others.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Christian W. Damus - Initial API and implementation
+ *
+ *****************************************************************************/
+
+package org.eclipse.papyrus.infra.emf.resource;
+
+import static org.eclipse.papyrus.infra.emf.internal.resource.InternalIndexUtil.getSemanticModelFileExtensions;
+
+import java.util.HashSet;
+import java.util.Set;
+import java.util.function.Supplier;
+
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.emf.common.util.TreeIterator;
+import org.eclipse.emf.common.util.URI;
+import org.eclipse.emf.ecore.EObject;
+import org.eclipse.emf.ecore.EReference;
+import org.eclipse.emf.ecore.InternalEObject;
+import org.eclipse.emf.ecore.resource.Resource;
+import org.eclipse.emf.ecore.resource.ResourceSet;
+import org.eclipse.emf.ecore.resource.impl.ResourceSetImpl;
+import org.eclipse.emf.ecore.resource.impl.ResourceSetImpl.ResourceLocator;
+import org.eclipse.emf.ecore.util.EcoreUtil;
+import org.eclipse.emf.ecore.util.InternalEList;
+import org.eclipse.papyrus.infra.emf.Activator;
+
+/**
+ * A {@link ResourceLocator} that can be used with any {@link ResourceSet}
+ * to ensure that when a shard resource is demand-loaded by proxy resolution,
+ * it is loaded from the top down to ensure that dependencies such as profile
+ * applications in UML models are ensured before loading the shard.
+ *
+ * @since 2.1
+ */
+public class ShardResourceLocator extends ResourceLocator {
+
+ private final Set inDemandLoadHelper = new HashSet<>();
+
+ private final Supplier extends ICrossReferenceIndex> index;
+
+ private final Set semanticModelExtensions;
+
+ /**
+ * Installs me in the given resource set. I use the best available
+ * {@link ICrossReferenceIndex} for resolution of shard relationships.
+ *
+ * @param resourceSet
+ * the resource set for which I shall provide
+ */
+ public ShardResourceLocator(ResourceSetImpl resourceSet) {
+ this(resourceSet, () -> ICrossReferenceIndex.getInstance(resourceSet));
+ }
+
+ /**
+ * Installs me in the given resource set with a particular {@code index}.
+ *
+ * @param resourceSet
+ * the resource set for which I shall provide
+ * @param index
+ * the index to use for resolving shard relationships
+ */
+ public ShardResourceLocator(ResourceSetImpl resourceSet, ICrossReferenceIndex index) {
+ this(resourceSet, () -> index);
+ }
+
+ /**
+ * Installs me in the given resource set with a dynamic {@code index} supplier.
+ *
+ * @param resourceSet
+ * the resource set for which I shall provide
+ * @param index
+ * a dynamic supplier of the index to use for resolving shard relationships
+ */
+ public ShardResourceLocator(ResourceSetImpl resourceSet, Supplier extends ICrossReferenceIndex> index) {
+ super(resourceSet);
+
+ this.index = index;
+ this.semanticModelExtensions = getSemanticModelFileExtensions(resourceSet);
+ }
+
+ /**
+ * Handles shard resources by loading their roots first and the chain(s) of resources
+ * all the way down to the shard.
+ */
+ @Override
+ public Resource getResource(URI uri, boolean loadOnDemand) {
+ if (loadOnDemand && uri.isPlatformResource()
+ && semanticModelExtensions.contains(uri.fileExtension())) {
+
+ // Is it already loaded? This saves blocking on the cross-reference index
+ Resource existing = getResource(uri, false);
+ if ((existing == null) || !existing.isLoaded()) {
+ // Do our peculiar process
+ handleShard(uri);
+ }
+ }
+
+ return basicGetResource(uri, loadOnDemand);
+ }
+
+ /**
+ * Handles the case of demand-loading of a shard by loading it from the root resource
+ * on down.
+ *
+ * @param uri
+ * the URI of a resource that may be a shard
+ */
+ protected void handleShard(URI uri) {
+ try {
+ Set parents = index.get().getParents(uri);
+
+ if (!parents.isEmpty()) {
+ // Load from the root resource down
+ parents.stream()
+ .filter(this::notLoaded)
+ .forEach(r -> loadParentResource(r, uri));
+ }
+ } catch (CoreException e) {
+ Activator.log.log(e.getStatus());
+ }
+ }
+
+ protected boolean notLoaded(URI uri) {
+ Resource resource = resourceSet.getResource(uri, false);
+ return (resource == null) || !resource.isLoaded();
+ }
+
+ protected void loadParentResource(URI parentURI, URI shard) {
+ // This operates recursively on the demand-load helper
+ Resource parent = resourceSet.getResource(parentURI, true);
+
+ // Unlock the shardresource, now
+ inDemandLoadHelper.remove(shard);
+
+ // Scan for the cross-resource containment
+ URI shardURI = normalize(shard);
+ for (TreeIterator iter = EcoreUtil.getAllProperContents(parent, false); iter.hasNext();) {
+ EObject next = iter.next();
+ if (next.eIsProxy()) {
+ // Must always only compare normalized URIs to determine 'same resource'
+ URI proxyURI = normalize(((InternalEObject) next).eProxyURI());
+ if (proxyURI.trimFragment().equals(shardURI)) {
+ // This is our parent object
+ EObject parentObject = next.eContainer();
+
+ // Resolve the reference
+ EReference containment = next.eContainmentFeature();
+ if (!containment.isMany()) {
+ // Easy case
+ parentObject.eGet(containment, true);
+ } else {
+ InternalEList> list = (InternalEList>) parentObject.eGet(containment);
+ int index = list.basicIndexOf(next);
+ if (index >= 0) {
+ // Resolve it
+ list.get(index);
+ }
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ protected URI normalize(URI uri) {
+ return resourceSet.getURIConverter().normalize(uri);
+ }
+
+}
diff --git a/plugins/infra/emf/org.eclipse.papyrus.infra.emf/src/org/eclipse/papyrus/infra/emf/resource/index/IWorkspaceModelIndexProvider.java b/plugins/infra/emf/org.eclipse.papyrus.infra.emf/src/org/eclipse/papyrus/infra/emf/resource/index/IWorkspaceModelIndexProvider.java
new file mode 100644
index 00000000000..fb18c57198b
--- /dev/null
+++ b/plugins/infra/emf/org.eclipse.papyrus.infra.emf/src/org/eclipse/papyrus/infra/emf/resource/index/IWorkspaceModelIndexProvider.java
@@ -0,0 +1,27 @@
+/*****************************************************************************
+ * Copyright (c) 2016 Christian W. Damus and others.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Christian W. Damus - Initial API and implementation
+ *
+ *****************************************************************************/
+
+package org.eclipse.papyrus.infra.emf.resource.index;
+
+import java.util.function.Supplier;
+
+/**
+ * A provider of a model index on the org.eclipse.papyrus.infra.emf.index
+ * extension point.
+ *
+ * @since 2.1
+ */
+@FunctionalInterface
+public interface IWorkspaceModelIndexProvider extends Supplier> {
+ // Nothing to add
+}
diff --git a/plugins/infra/emf/org.eclipse.papyrus.infra.emf/src/org/eclipse/papyrus/infra/emf/resource/index/WorkspaceModelIndex.java b/plugins/infra/emf/org.eclipse.papyrus.infra.emf/src/org/eclipse/papyrus/infra/emf/resource/index/WorkspaceModelIndex.java
index 98e6b063472..91b17fc5ef3 100644
--- a/plugins/infra/emf/org.eclipse.papyrus.infra.emf/src/org/eclipse/papyrus/infra/emf/resource/index/WorkspaceModelIndex.java
+++ b/plugins/infra/emf/org.eclipse.papyrus.infra.emf/src/org/eclipse/papyrus/infra/emf/resource/index/WorkspaceModelIndex.java
@@ -1,5 +1,5 @@
/*****************************************************************************
- * Copyright (c) 2014, 2015 Christian W. Damus and others.
+ * Copyright (c) 2014, 2016 Christian W. Damus and others.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
@@ -15,63 +15,41 @@ package org.eclipse.papyrus.infra.emf.resource.index;
import java.io.IOException;
import java.io.InputStream;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Deque;
+import java.io.ObjectInput;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutput;
+import java.io.ObjectOutputStream;
+import java.io.OutputStream;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.Semaphore;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.Condition;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
+import java.util.stream.Collectors;
import org.eclipse.core.resources.IFile;
import org.eclipse.core.resources.IProject;
import org.eclipse.core.resources.IResource;
-import org.eclipse.core.resources.IResourceChangeEvent;
-import org.eclipse.core.resources.IResourceChangeListener;
-import org.eclipse.core.resources.IResourceDelta;
-import org.eclipse.core.resources.IResourceDeltaVisitor;
-import org.eclipse.core.resources.IResourceVisitor;
-import org.eclipse.core.resources.IWorkspace;
+import org.eclipse.core.resources.IWorkspaceRoot;
import org.eclipse.core.resources.ResourcesPlugin;
import org.eclipse.core.runtime.CoreException;
-import org.eclipse.core.runtime.IProgressMonitor;
-import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.IPath;
+import org.eclipse.core.runtime.Path;
import org.eclipse.core.runtime.Platform;
import org.eclipse.core.runtime.QualifiedName;
-import org.eclipse.core.runtime.Status;
-import org.eclipse.core.runtime.SubMonitor;
import org.eclipse.core.runtime.content.IContentType;
-import org.eclipse.core.runtime.content.IContentTypeManager;
-import org.eclipse.core.runtime.jobs.IJobChangeEvent;
-import org.eclipse.core.runtime.jobs.IJobChangeListener;
-import org.eclipse.core.runtime.jobs.Job;
-import org.eclipse.core.runtime.jobs.JobChangeAdapter;
-import org.eclipse.osgi.util.NLS;
-import org.eclipse.papyrus.infra.core.utils.JobBasedFuture;
-import org.eclipse.papyrus.infra.core.utils.JobExecutorService;
import org.eclipse.papyrus.infra.emf.Activator;
-import org.eclipse.papyrus.infra.tools.util.ReferenceCounted;
+import org.eclipse.papyrus.infra.emf.internal.resource.index.IIndexSaveParticipant;
+import org.eclipse.papyrus.infra.emf.internal.resource.index.IndexManager;
+import org.eclipse.papyrus.infra.emf.internal.resource.index.IndexPersistenceManager;
+import org.eclipse.papyrus.infra.emf.internal.resource.index.InternalModelIndex;
import com.google.common.base.Function;
-import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Multimap;
-import com.google.common.collect.Queues;
import com.google.common.collect.SetMultimap;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
@@ -79,58 +57,86 @@ import com.google.common.util.concurrent.ListenableFuture;
/**
* A general-purpose index of model resources in the Eclipse workspace.
*/
-public class WorkspaceModelIndex {
- private static final int MAX_INDEX_RETRIES = 3;
+public class WorkspaceModelIndex extends InternalModelIndex {
+ private static final long INDEX_RECORD_SERIAL_VERSION = 1L;
private final IndexHandler extends T> indexer;
+ private final PersistentIndexHandler pIndexer;
- private final QualifiedName indexKey;
+ private final String indexName;
private final IContentType contentType;
+ private final IWorkspaceRoot wsRoot = ResourcesPlugin.getWorkspace().getRoot();
private final SetMultimap index = HashMultimap.create();
- private final IResourceChangeListener workspaceListener = new WorkspaceListener();
- private final Map activeJobs = Maps.newHashMap();
- private final ContentTypeService contentTypeService;
private final Set fileExtensions;
-
- private final JobWrangler jobWrangler;
-
- private final CopyOnWriteArrayList listeners = Lists.newCopyOnWriteArrayList();
+ private boolean started;
public WorkspaceModelIndex(String name, String contentType, IndexHandler extends T> indexer) {
this(name, contentType, indexer, 0);
}
public WorkspaceModelIndex(String name, String contentType, IndexHandler extends T> indexer, int maxConcurrentJobs) {
- super();
+ this(name, contentType,
+ Platform.getContentTypeManager().getContentType(contentType).getFileSpecs(IContentType.FILE_EXTENSION_SPEC),
+ indexer, maxConcurrentJobs);
+ }
+
+ /**
+ * @since 2.1
+ */
+ public WorkspaceModelIndex(String name, String contentType, String[] fileExtensions, IndexHandler extends T> indexer, int maxConcurrentJobs) {
+ this(name, contentType, fileExtensions, indexer, null, maxConcurrentJobs);
+ }
+
+ /**
+ * @since 2.1
+ */
+ public WorkspaceModelIndex(String name, String contentType, PersistentIndexHandler indexer) {
+ this(name, contentType, indexer, 0);
+ }
+
+ /**
+ * @since 2.1
+ */
+ public WorkspaceModelIndex(String name, String contentType, PersistentIndexHandler indexer, int maxConcurrentJobs) {
+ this(name, contentType,
+ Platform.getContentTypeManager().getContentType(contentType).getFileSpecs(IContentType.FILE_EXTENSION_SPEC),
+ indexer, maxConcurrentJobs);
+ }
- this.indexKey = new QualifiedName("org.eclipse.papyrus.modelindex", name); //$NON-NLS-1$
+ /**
+ * @since 2.1
+ */
+ public WorkspaceModelIndex(String name, String contentType, String[] fileExtensions, PersistentIndexHandler indexer, int maxConcurrentJobs) {
+ this(name, contentType, fileExtensions, indexer, indexer, maxConcurrentJobs);
+ }
+
+ private WorkspaceModelIndex(String name, String contentType, String[] fileExtensions, IndexHandler extends T> indexer, PersistentIndexHandler pIndexer, int maxConcurrentJobs) {
+ super(new QualifiedName(Activator.PLUGIN_ID, "index:" + name), maxConcurrentJobs); //$NON-NLS-1$
+
+ this.indexName = name;
this.contentType = Platform.getContentTypeManager().getContentType(contentType);
this.indexer = indexer;
+ this.pIndexer = pIndexer;
- String[] fileSpecs = this.contentType.getFileSpecs(IContentType.FILE_EXTENSION_SPEC);
- if ((fileSpecs != null) && (fileSpecs.length > 0)) {
- fileExtensions = ImmutableSet.copyOf(fileSpecs);
+ if ((fileExtensions != null) && (fileExtensions.length > 0)) {
+ this.fileExtensions = ImmutableSet.copyOf(fileExtensions);
} else {
- fileExtensions = null;
+ this.fileExtensions = null;
}
-
- contentTypeService = ContentTypeService.getInstance();
- jobWrangler = new JobWrangler(maxConcurrentJobs);
-
- startIndex();
}
+ @Override
public void dispose() {
- ResourcesPlugin.getWorkspace().removeResourceChangeListener(workspaceListener);
- Job.getJobManager().cancel(this);
- ContentTypeService.dispose(contentTypeService);
+ if (pIndexer != null) {
+ IndexPersistenceManager.INSTANCE.removeIndex(this);
+ }
synchronized (index) {
for (IFile next : index.values()) {
try {
- next.setSessionProperty(indexKey, null);
+ next.setSessionProperty(getIndexKey(), null);
} catch (CoreException e) {
// Just continue, best-effort. There's nothing else to do
}
@@ -140,23 +146,153 @@ public class WorkspaceModelIndex {
}
}
- private void startIndex() {
- IWorkspace workspace = ResourcesPlugin.getWorkspace();
- workspace.addResourceChangeListener(workspaceListener, IResourceChangeEvent.POST_CHANGE);
+ /**
+ * @since 2.1
+ */
+ @Override
+ protected final void start() {
+ if (started) {
+ throw new IllegalStateException("index already started: " + getName()); //$NON-NLS-1$
+ }
+ started = true;
- index(Arrays.asList(workspace.getRoot().getProjects()));
+ // If we support persistence, initialize from the store
+ if (pIndexer != null) {
+ InputStream storeInput = IndexPersistenceManager.INSTANCE.addIndex(this, createSaveParticipant());
+ if (storeInput != null) {
+ try {
+ loadIndex(storeInput);
+ } catch (IOException e) {
+ // The input was already closed, if it could be
+ Activator.log.error("Failed to load index data for " + getName(), e); //$NON-NLS-1$
+ }
+ }
+ }
}
- void index(Collection extends IProject> projects) {
- List jobs = Lists.newArrayListWithCapacity(projects.size());
- for (IProject next : projects) {
- jobs.add(new IndexProjectJob(next));
+ private void loadIndex(InputStream storeInput) throws IOException {
+ List store = loadStore(storeInput);
+
+ synchronized (index) {
+ for (IndexRecord record : store) {
+ if (record.file.isAccessible()) {
+ try {
+ record.file.setSessionProperty(getIndexKey(), record);
+ index.put(record.file.getProject(), record.file);
+ } catch (CoreException e) {
+ // Doesn't matter; it will be indexed from scratch, then
+ Activator.log.log(e.getStatus());
+ }
+ }
+ }
}
- schedule(jobs);
}
- void index(IProject project) {
- schedule(new IndexProjectJob(project));
+ private List loadStore(InputStream storeInput) throws IOException {
+ List result = Collections.emptyList();
+
+ try (InputStream outer = storeInput; ObjectInputStream input = createObjectInput(outer)) {
+ // Load the version. So far, we're at the first version
+ long version = input.readLong();
+ if (version != INDEX_RECORD_SERIAL_VERSION) {
+ throw new IOException("Unexpected index record serial version " + version); //$NON-NLS-1$
+ }
+
+ // Read the number of records
+ int count = input.readInt();
+ result = new ArrayList<>(count);
+
+ // Read the records
+ for (int i = 0; i < count; i++) {
+ try {
+ result.add(readIndexRecord(input));
+ } catch (ClassNotFoundException e) {
+ throw new IOException(e);
+ }
+ }
+ }
+
+ return result;
+ }
+
+ private IndexRecord readIndexRecord(ObjectInput in) throws IOException, ClassNotFoundException {
+ // Load the file
+ IPath path = new Path((String) in.readObject());
+ IFile file = wsRoot.getFile(path);
+
+ // Load the index data
+ @SuppressWarnings("unchecked")
+ T index = (T) in.readObject();
+
+ return new IndexRecord(file, index);
+ }
+
+ private IIndexSaveParticipant createSaveParticipant() {
+ return new IIndexSaveParticipant() {
+ @Override
+ public void save(WorkspaceModelIndex> index, OutputStream storeOutput) throws IOException, CoreException {
+ if (index == WorkspaceModelIndex.this) {
+ List store;
+
+ synchronized (index) {
+ store = index.index.values().stream()
+ .filter(IResource::isAccessible)
+ .map(f -> {
+ IndexRecord result = null;
+
+ try {
+ @SuppressWarnings("unchecked")
+ IndexRecord __ = (IndexRecord) f.getSessionProperty(getIndexKey());
+ result = __;
+ } catch (CoreException e) {
+ // Doesn't matter; we'll just index it next time
+ Activator.log.log(e.getStatus());
+ }
+
+ return result;
+ })
+ .collect(Collectors.toList());
+ }
+
+ saveStore(store, storeOutput);
+ }
+ }
+ };
+ }
+
+ private void saveStore(List store, OutputStream storeOutput) throws IOException {
+ try (ObjectOutputStream output = new ObjectOutputStream(storeOutput)) {
+ // Write the version
+ output.writeLong(INDEX_RECORD_SERIAL_VERSION);
+
+ // Write the number of records
+ output.writeInt(store.size());
+
+ // Write the records
+ for (IndexRecord next : store) {
+ writeIndexRecord(next, output);
+ }
+ }
+ }
+
+ private void writeIndexRecord(IndexRecord record, ObjectOutput out) throws IOException {
+ out.writeObject(record.file.getFullPath().toPortableString());
+ out.writeObject(record.index);
+ }
+
+ /**
+ * Obtains the name of this index.
+ *
+ * @return my name
+ * @since 2.1
+ */
+ public final String getName() {
+ return indexName;
+ }
+
+ @Override
+ public String toString() {
+ return String.format("WorkspaceModelIndex(%s)", getName()); //$NON-NLS-1$
}
/**
@@ -174,8 +310,9 @@ public class WorkspaceModelIndex {
}
/**
- * Obtains an asynchronous future result that is scheduled to run after any pending indexing work has completed.
- * The {@code callable} is invoked under synchronization on the index, so it must be careful about how it
+ * Obtains an asynchronous future result that is scheduled to run after any
+ * pending indexing work has completed. The {@code callable} is invoked under
+ * synchronization on the index, so it must be careful about how it
* synchronizes on other objects to avoid deadlocks.
*
* @param callable
@@ -183,39 +320,13 @@ public class WorkspaceModelIndex {
*
* @return the future result of the operation
*/
- public ListenableFuture afterIndex(final Callable callable) {
- ListenableFuture result;
-
- if (Job.getJobManager().find(this).length == 0) {
- // Result is available now
- try {
- result = Futures.immediateFuture(callable.call());
- } catch (Exception e) {
- result = Futures.immediateFailedFuture(e);
+ @Override
+ public ListenableFuture afterIndex(Callable callable) {
+ return super.afterIndex(() -> {
+ synchronized (index) {
+ return callable.call();
}
- } else {
- JobBasedFuture job = new JobBasedFuture(NLS.bind("Wait for model index \"{0}\"", indexKey.getLocalName())) {
- {
- // setSystem(true);
- }
-
- @Override
- protected V compute(IProgressMonitor monitor) throws Exception {
- V result;
-
- Job.getJobManager().join(WorkspaceModelIndex.this, monitor);
- synchronized (index) {
- result = callable.call();
- }
-
- return result;
- }
- };
- job.schedule();
- result = job;
- }
-
- return result;
+ });
}
/**
@@ -259,9 +370,9 @@ public class WorkspaceModelIndex {
for (IFile next : index.values()) {
try {
@SuppressWarnings("unchecked")
- T value = (T) next.getSessionProperty(indexKey);
- if (value != null) {
- result.put(next, value);
+ IndexRecord record = (IndexRecord) next.getSessionProperty(getIndexKey());
+ if (record != null) {
+ result.put(next, record.index);
}
} catch (CoreException e) {
Activator.log.error("Failed to access index data for file " + next.getFullPath(), e); //$NON-NLS-1$
@@ -271,17 +382,32 @@ public class WorkspaceModelIndex {
return result.build();
}
- void process(IFile file) throws CoreException {
+ /**
+ * @since 2.1
+ */
+ @Override
+ protected final void process(IFile file) throws CoreException {
IProject project = file.getProject();
if (match(file)) {
- add(project, file);
+ @SuppressWarnings("unchecked")
+ IndexRecord record = (IndexRecord) file.getSessionProperty(getIndexKey());
+ if ((record == null) || record.isObsolete()) {
+ add(project, file);
+ } else {
+ // If it's not obsolete, then we're loading it from persistent storage
+ init(project, file, record);
+ }
} else {
remove(project, file);
}
}
- boolean match(IFile file) {
+ /**
+ * @since 2.1
+ */
+ @Override
+ protected final boolean match(IFile file) {
boolean result = false;
// Don't even attempt to match the content type if the file extension doesn't match.
@@ -291,7 +417,7 @@ public class WorkspaceModelIndex {
&& ((fileExtensions == null) || fileExtensions.contains(file.getFileExtension()))
&& file.isSynchronized(IResource.DEPTH_ZERO)) {
- IContentType[] contentTypes = contentTypeService.getContentTypes(file);
+ IContentType[] contentTypes = getContentTypes(file);
if (contentTypes != null) {
for (int i = 0; (i < contentTypes.length) && !result; i++) {
result = contentTypes[i].isKindOf(contentType);
@@ -302,180 +428,72 @@ public class WorkspaceModelIndex {
return result;
}
- void add(IProject project, IFile file) throws CoreException {
- synchronized (index) {
- index.put(project, file);
- file.setSessionProperty(indexKey, indexer.index(file));
+ void init(IProject project, IFile file, IndexRecord record) throws CoreException {
+ if (pIndexer.load(file, record.index)) {
+ synchronized (index) {
+ index.put(project, file);
+ file.setSessionProperty(getIndexKey(), record);
+ }
}
}
- void remove(IProject project, IFile file) throws CoreException {
- synchronized (index) {
- index.remove(project, file);
- indexer.unindex(file);
+ void add(IProject project, IFile file) throws CoreException {
+ T data = indexer.index(file);
- if (file.exists()) {
- file.setSessionProperty(indexKey, null);
- }
+ synchronized (index) {
+ index.put(project, file);
+ file.setSessionProperty(getIndexKey(), new IndexRecord(file, data));
}
}
- void remove(IProject project) throws CoreException {
+ /**
+ * @since 2.1
+ */
+ @Override
+ protected final void remove(IProject project, IFile file) throws CoreException {
+ boolean unindex;
+
synchronized (index) {
- if (index.containsKey(project)) {
- for (IFile next : index.get(project)) {
- indexer.unindex(next);
- }
- index.removeAll(project);
- }
+ // Don't need to do any work on the index data if
+ // this wasn't in the index in the first place
+ unindex = index.remove(project, file);
}
- }
- ReindexProjectJob reindex(IProject project, Iterable extends IndexDelta> deltas) {
- ReindexProjectJob result = null;
-
- synchronized (activeJobs) {
- AbstractIndexJob active = activeJobs.get(project);
-
- if (active != null) {
- switch (active.kind()) {
- case REINDEX:
- @SuppressWarnings("unchecked")
- ReindexProjectJob reindex = (ReindexProjectJob) active;
- reindex.addDeltas(deltas);
- break;
- case INDEX:
- @SuppressWarnings("unchecked")
- IndexProjectJob index = (IndexProjectJob) active;
- ReindexProjectJob followup = index.getFollowup();
- if (followup != null) {
- followup.addDeltas(deltas);
- } else {
- followup = new ReindexProjectJob(project, deltas);
- index.setFollowup(followup);
- }
- break;
- case MASTER:
- throw new IllegalStateException("Master job is in the active table."); //$NON-NLS-1$
+ if (unindex) {
+ try {
+ indexer.unindex(file);
+ } finally {
+ if (file.exists()) {
+ file.setSessionProperty(getIndexKey(), null);
}
- } else {
- // No active job. We'll need a new one
- result = new ReindexProjectJob(project, deltas);
}
}
-
- return result;
}
- IResourceVisitor getWorkspaceVisitor(final IProgressMonitor monitor) {
- return new IResourceVisitor() {
-
- @Override
- public boolean visit(IResource resource) throws CoreException {
- if (resource.getType() == IResource.FILE) {
- process((IFile) resource);
- }
-
- return !monitor.isCanceled();
- }
- };
- }
+ /**
+ * @since 2.1
+ */
+ @Override
+ protected final void remove(IProject project) throws CoreException {
+ Set files;
- private void schedule(Collection extends AbstractIndexJob> jobs) {
- // Synchronize on the active jobs because this potentially alters the wrangler's follow-up job
- synchronized (activeJobs) {
- jobWrangler.add(jobs);
+ synchronized (index) {
+ files = index.containsKey(project)
+ ? index.removeAll(project)
+ : null;
}
- }
- private void schedule(AbstractIndexJob job) {
- // Synchronize on the active jobs because this potentially alters the wrangler's follow-up job
- synchronized (activeJobs) {
- jobWrangler.add(job);
+ if (files != null) {
+ files.forEach(indexer::unindex);
}
}
public void addListener(IWorkspaceModelIndexListener listener) {
- listeners.addIfAbsent(listener);
+ IndexManager.getInstance().addListener(this, listener);
}
public void removeListener(IWorkspaceModelIndexListener listener) {
- listeners.remove(listener);
- }
-
- private void notifyStarting(AbstractIndexJob indexJob) {
- if (!listeners.isEmpty()) {
- WorkspaceModelIndexEvent event;
-
- switch (indexJob.kind()) {
- case INDEX:
- event = new WorkspaceModelIndexEvent(this, WorkspaceModelIndexEvent.ABOUT_TO_CALCULATE, indexJob.getProject());
- for (IWorkspaceModelIndexListener next : listeners) {
- try {
- next.indexAboutToCalculate(event);
- } catch (Exception e) {
- Activator.log.error("Uncaught exception in index listsner.", e); //$NON-NLS-1$
- }
- }
- break;
- case REINDEX:
- event = new WorkspaceModelIndexEvent(this, WorkspaceModelIndexEvent.ABOUT_TO_RECALCULATE, indexJob.getProject());
- for (IWorkspaceModelIndexListener next : listeners) {
- try {
- next.indexAboutToRecalculate(event);
- } catch (Exception e) {
- Activator.log.error("Uncaught exception in index listsner.", e); //$NON-NLS-1$
- }
- }
- break;
- case MASTER:
- // Pass
- break;
- }
- }
- }
-
- private void notifyFinished(AbstractIndexJob indexJob, IStatus status) {
- if (!listeners.isEmpty()) {
- WorkspaceModelIndexEvent event;
-
- if ((status != null) && (status.getSeverity() >= IStatus.ERROR)) {
- event = new WorkspaceModelIndexEvent(this, WorkspaceModelIndexEvent.FAILED, indexJob.getProject());
- for (IWorkspaceModelIndexListener next : listeners) {
- try {
- next.indexFailed(event);
- } catch (Exception e) {
- Activator.log.error("Uncaught exception in index listsner.", e); //$NON-NLS-1$
- }
- }
- } else {
- switch (indexJob.kind()) {
- case INDEX:
- event = new WorkspaceModelIndexEvent(this, WorkspaceModelIndexEvent.CALCULATED, indexJob.getProject());
- for (IWorkspaceModelIndexListener next : listeners) {
- try {
- next.indexCalculated(event);
- } catch (Exception e) {
- Activator.log.error("Uncaught exception in index listsner.", e); //$NON-NLS-1$
- }
- }
- break;
- case REINDEX:
- event = new WorkspaceModelIndexEvent(this, WorkspaceModelIndexEvent.RECALCULATED, indexJob.getProject());
- for (IWorkspaceModelIndexListener next : listeners) {
- try {
- next.indexRecalculated(event);
- } catch (Exception e) {
- Activator.log.error("Uncaught exception in index listsner.", e); //$NON-NLS-1$
- }
- }
- break;
- case MASTER:
- // Pass
- break;
- }
- }
- }
+ IndexManager.getInstance().removeListener(this, listener);
}
//
@@ -505,537 +523,48 @@ public class WorkspaceModelIndex {
void unindex(IFile file);
}
- private enum JobKind {
- MASTER, INDEX, REINDEX;
-
- boolean isSystem() {
- return this != MASTER;
- }
- }
-
- private abstract class AbstractIndexJob extends Job {
- private final IProject project;
-
- private volatile Semaphore permit;
-
- AbstractIndexJob(String name, IProject project) {
- super(name);
-
- this.project = project;
- this.permit = permit;
-
- if (project != null) {
- setRule(project);
- synchronized (activeJobs) {
- if (!activeJobs.containsKey(project)) {
- activeJobs.put(project, this);
- }
- }
- }
-
- setSystem(kind().isSystem());
- }
-
- @Override
- public boolean belongsTo(Object family) {
- return family == WorkspaceModelIndex.this;
- }
-
- final IProject getProject() {
- return project;
- }
-
- abstract JobKind kind();
-
- @Override
- protected final IStatus run(IProgressMonitor monitor) {
- IStatus result;
-
- try {
- result = doRun(monitor);
- } finally {
- synchronized (activeJobs) {
- AbstractIndexJob followup = getFollowup();
-
- if (project != null) {
- if (followup == null) {
- activeJobs.remove(project);
- } else {
- activeJobs.put(project, followup);
- }
- }
-
- if (followup != null) {
- // Kick off the follow-up job
- WorkspaceModelIndex.this.schedule(followup);
- }
- }
- }
-
- return result;
- }
-
- final Semaphore getPermit() {
- return permit;
- }
-
- final void setPermit(Semaphore permit) {
- this.permit = permit;
- }
-
- protected abstract IStatus doRun(IProgressMonitor monitor);
-
- protected AbstractIndexJob getFollowup() {
- return null;
- }
- }
-
- private class JobWrangler extends AbstractIndexJob {
- private final Lock lock = new ReentrantLock();
-
- private final Deque queue = Queues.newArrayDeque();
-
- private final AtomicBoolean active = new AtomicBoolean();
- private final Semaphore indexJobSemaphore;
-
- JobWrangler(int maxConcurrentJobs) {
- super("Workspace model indexer", null);
-
- indexJobSemaphore = new Semaphore((maxConcurrentJobs <= 0) ? Integer.MAX_VALUE : maxConcurrentJobs);
- }
-
- @Override
- JobKind kind() {
- return JobKind.MASTER;
- }
-
- void add(AbstractIndexJob job) {
- lock.lock();
-
- try {
- scheduleIfNeeded();
- queue.add(job);
- } finally {
- lock.unlock();
- }
- }
-
- private void scheduleIfNeeded() {
- if (active.compareAndSet(false, true)) {
- // I am a new job
- schedule();
- }
- }
-
- void add(Iterable extends AbstractIndexJob> jobs) {
- lock.lock();
-
- try {
- for (AbstractIndexJob next : jobs) {
- add(next);
- }
- } finally {
- lock.unlock();
- }
- }
-
- @Override
- protected IStatus doRun(IProgressMonitor progressMonitor) {
- final AtomicInteger pending = new AtomicInteger(); // How many permits have we issued?
- final Condition pendingChanged = lock.newCondition();
-
- final SubMonitor monitor = SubMonitor.convert(progressMonitor, IProgressMonitor.UNKNOWN);
-
- IStatus result = Status.OK_STATUS;
-
- IJobChangeListener listener = new JobChangeAdapter() {
- private final Map retries = Maps.newHashMap();
-
- private Semaphore getIndexJobPermit(Job job) {
- return (job instanceof WorkspaceModelIndex>.AbstractIndexJob)
- ? ((WorkspaceModelIndex>.AbstractIndexJob) job).getPermit()
- : null;
- }
-
- @Override
- public void aboutToRun(IJobChangeEvent event) {
- Job starting = event.getJob();
-
- if (getIndexJobPermit(starting) == indexJobSemaphore) {
- // one of mine is starting
- @SuppressWarnings("unchecked")
- AbstractIndexJob indexJob = (AbstractIndexJob) starting;
- notifyStarting(indexJob);
- }
- }
-
- @Override
- public void done(IJobChangeEvent event) {
- final Job finished = event.getJob();
- if (getIndexJobPermit(finished) == indexJobSemaphore) {
- try {
- // one of mine has finished
- @SuppressWarnings("unchecked")
- AbstractIndexJob indexJob = (AbstractIndexJob) finished;
- IProject project = indexJob.getProject();
-
- notifyFinished(indexJob, event.getResult());
-
- if (project != null) {
- synchronized (retries) {
- if ((event.getResult() != null) && (event.getResult().getSeverity() >= IStatus.ERROR)) {
- // Indexing failed to complete. Need to re-build the index
- int count = retries.containsKey(project) ? retries.get(project) : 0;
- if (count++ < MAX_INDEX_RETRIES) {
- // Only retry up to three times
- index(project);
- }
- retries.put(project, ++count);
- } else {
- // Successful re-indexing. Forget the retries
- retries.remove(project);
- }
- }
- }
- } finally {
- // Release this job's permit for the next one in the queue
- indexJobSemaphore.release();
-
- // And it's no longer pending
- pending.decrementAndGet();
-
- lock.lock();
- try {
- pendingChanged.signalAll();
- } finally {
- lock.unlock();
- }
- }
- }
- }
- };
-
- getJobManager().addJobChangeListener(listener);
-
- lock.lock();
-
- try {
- out: for (;;) {
- for (AbstractIndexJob next = queue.poll(); next != null; next = queue.poll()) {
- lock.unlock();
- try {
- if (monitor.isCanceled()) {
- Thread.currentThread().interrupt();
- }
-
- // Enforce the concurrent jobs limit
- indexJobSemaphore.acquire();
- next.setPermit(indexJobSemaphore);
- pending.incrementAndGet();
-
- // Now go
- next.schedule();
- } catch (InterruptedException e) {
- // We were cancelled. Push this job back and re-schedule
- lock.lock();
- try {
- queue.addFirst(next);
- } finally {
- lock.unlock();
- }
- result = Status.CANCEL_STATUS;
- break out;
- } finally {
- lock.lock();
- }
- }
-
- if ((pending.get() <= 0) && queue.isEmpty()) {
- // Nothing left to wait for
- break out;
- } else if (pending.get() > 0) {
- try {
- if (monitor.isCanceled()) {
- Thread.currentThread().interrupt();
- }
-
- pendingChanged.await();
- } catch (InterruptedException e) {
- // We were cancelled. Re-schedule
- result = Status.CANCEL_STATUS;
- break out;
- }
- }
- }
-
- // We've finished wrangling index jobs, for now
- } finally {
- active.compareAndSet(true, false);
-
- // If we were canceled then we re-schedule after a delay to recover
- if (result == Status.CANCEL_STATUS) {
- // We cannot un-cancel a job, so we must replace ourselves with a new job
- schedule(1000L);
- } else {
- // Double-check
- if (!queue.isEmpty()) {
- // We'll have to go around again
- scheduleIfNeeded();
- }
- }
-
- lock.unlock();
-
- getJobManager().removeJobChangeListener(listener);
- }
-
- return result;
- }
- }
-
- private class IndexProjectJob extends AbstractIndexJob {
- private ReindexProjectJob followup;
-
- IndexProjectJob(IProject project) {
- super("Indexing project " + project.getName(), project);
- }
-
- @Override
- JobKind kind() {
- return JobKind.INDEX;
- }
-
- @Override
- protected IStatus doRun(IProgressMonitor monitor) {
- IStatus result = Status.OK_STATUS;
- final IProject project = getProject();
-
- monitor.beginTask("Indexing models in project " + project.getName(), IProgressMonitor.UNKNOWN);
-
- try {
- if (project.isAccessible()) {
- project.accept(getWorkspaceVisitor(monitor));
- } else {
- remove(project);
- }
-
- if (monitor.isCanceled()) {
- result = Status.CANCEL_STATUS;
- }
- } catch (CoreException e) {
- result = e.getStatus();
- } finally {
- monitor.done();
- }
-
- return result;
- }
-
- void setFollowup(ReindexProjectJob followup) {
- this.followup = followup;
- }
-
- @Override
- protected ReindexProjectJob getFollowup() {
- return followup;
- }
- }
-
- private class WorkspaceListener implements IResourceChangeListener {
- @Override
- public void resourceChanged(IResourceChangeEvent event) {
- final Multimap deltas = ArrayListMultimap.create();
-
- try {
- event.getDelta().accept(new IResourceDeltaVisitor() {
-
- @Override
- public boolean visit(IResourceDelta delta) throws CoreException {
- if (delta.getResource().getType() == IResource.FILE) {
- IFile file = (IFile) delta.getResource();
-
- switch (delta.getKind()) {
- case IResourceDelta.CHANGED:
- if ((delta.getFlags() & (IResourceDelta.SYNC | IResourceDelta.CONTENT | IResourceDelta.REPLACED)) != 0) {
- // Re-index in place
- deltas.put(file.getProject(), new IndexDelta(file, IndexDelta.DeltaKind.REINDEX));
- }
- break;
- case IResourceDelta.REMOVED:
- deltas.put(file.getProject(), new IndexDelta(file, IndexDelta.DeltaKind.UNINDEX));
- break;
- case IResourceDelta.ADDED:
- deltas.put(file.getProject(), new IndexDelta(file, IndexDelta.DeltaKind.INDEX));
- break;
- }
- }
- return true;
- }
- });
- } catch (CoreException e) {
- Activator.log.error("Failed to analyze resource changes for re-indexing.", e); //$NON-NLS-1$
- }
-
- if (!deltas.isEmpty()) {
- List jobs = Lists.newArrayListWithCapacity(deltas.keySet().size());
- for (IProject next : deltas.keySet()) {
- ReindexProjectJob reindex = reindex(next, deltas.get(next));
- if (reindex != null) {
- jobs.add(reindex);
- }
- }
- schedule(jobs);
- }
- }
- }
-
- private static class IndexDelta {
- private final IFile file;
-
- private final DeltaKind kind;
-
- IndexDelta(IFile file, DeltaKind kind) {
- this.file = file;
- this.kind = kind;
- }
-
- //
- // Nested types
- //
-
- enum DeltaKind {
- INDEX, REINDEX, UNINDEX
- }
- }
-
- private class ReindexProjectJob extends AbstractIndexJob {
- private final IProject project;
- private final ConcurrentLinkedQueue deltas;
-
- ReindexProjectJob(IProject project, Iterable extends IndexDelta> deltas) {
- super("Re-indexing project " + project.getName(), project);
- this.project = project;
- this.deltas = Queues.newConcurrentLinkedQueue(deltas);
- }
-
- @Override
- JobKind kind() {
- return JobKind.REINDEX;
- }
-
- void addDeltas(Iterable extends IndexDelta> deltas) {
- Iterables.addAll(this.deltas, deltas);
- }
-
- @Override
- protected IStatus doRun(IProgressMonitor monitor) {
- IStatus result = Status.OK_STATUS;
-
- monitor.beginTask("Re-indexing models in project " + project.getName(), IProgressMonitor.UNKNOWN);
-
- try {
- for (IndexDelta next = deltas.poll(); next != null; next = deltas.poll()) {
- if (monitor.isCanceled()) {
- result = Status.CANCEL_STATUS;
- break;
- }
-
- try {
- switch (next.kind) {
- case INDEX:
- case REINDEX:
- process(next.file);
- break;
- case UNINDEX:
- remove(project, next.file);
- break;
- }
- } catch (CoreException e) {
- result = e.getStatus();
- break;
- } finally {
- monitor.worked(1);
- }
- }
- } finally {
- monitor.done();
- }
-
- return result;
- }
-
- @Override
- protected AbstractIndexJob getFollowup() {
- // If I still have work to do, then I am my own follow-up
- return deltas.isEmpty() ? null : this;
- }
+ /**
+ * Extension interface for index handlers that provide persistable index
+ * data associated with each file. This enables storage of the index in
+ * the workspace metadata for quick initialization on start-up, requiring
+ * re-calculation of the index only for files that were changed since the
+ * workspace was last closed.
+ *
+ * @param
+ * the index data store type, which must be {@link Serializable}
+ * @since 2.1
+ */
+ public static interface PersistentIndexHandler extends IndexHandler {
+ /**
+ * Initializes the {@code index} data for a file from the persistent store.
+ *
+ * @param file
+ * a file in the workspace
+ * @param index
+ * its previously stored index
+ *
+ * @return whether the {@code index} data were successfully integrated.
+ * A {@code false} result indicates that the file must be indexed
+ * from scratch
+ */
+ boolean load(IFile file, T index);
}
- private static final class ContentTypeService extends ReferenceCounted {
- private static ContentTypeService instance = null;
-
- private final ExecutorService serialExecution = new JobExecutorService();
-
- private final IContentTypeManager mgr = Platform.getContentTypeManager();
+ private final class IndexRecord {
+ private IFile file;
+ private long generation;
+ private T index;
- private ContentTypeService() {
+ IndexRecord(IFile file, T index) {
super();
- }
-
- synchronized static ContentTypeService getInstance() {
- ContentTypeService result = instance;
- if (result == null) {
- result = new ContentTypeService();
- instance = result;
- }
-
- return result.retain();
- }
-
- synchronized static void dispose(ContentTypeService service) {
- service.release();
- }
-
- @Override
- protected void dispose() {
- serialExecution.shutdownNow();
-
- if (instance == this) {
- instance = null;
- }
+ this.file = file;
+ this.generation = file.getModificationStamp();
+ this.index = index;
}
- IContentType[] getContentTypes(final IFile file) {
- Future futureResult = serialExecution.submit(new Callable() {
-
- @Override
- public IContentType[] call() {
- IContentType[] result = null;
- InputStream input = null;
-
- if (file.isAccessible()) {
- try {
- input = file.getContents(true);
- result = mgr.findContentTypesFor(input, file.getName());
- } catch (Exception e) {
- Activator.log.error("Failed to index file " + file.getFullPath(), e); //$NON-NLS-1$
- } finally {
- if (input != null) {
- try {
- input.close();
- } catch (IOException e) {
- Activator.log.error("Failed to close indexed file " + file.getFullPath(), e); //$NON-NLS-1$
- }
- }
- }
- }
-
- return result;
- }
- });
-
- return Futures.getUnchecked(futureResult);
+ boolean isObsolete() {
+ return file.getModificationStamp() != generation;
}
}
}
diff --git a/plugins/infra/gmfdiag/org.eclipse.papyrus.infra.gmfdiag.common/META-INF/MANIFEST.MF b/plugins/infra/gmfdiag/org.eclipse.papyrus.infra.gmfdiag.common/META-INF/MANIFEST.MF
index 0869377db87..134a924bbfc 100644
--- a/plugins/infra/gmfdiag/org.eclipse.papyrus.infra.gmfdiag.common/META-INF/MANIFEST.MF
+++ b/plugins/infra/gmfdiag/org.eclipse.papyrus.infra.gmfdiag.common/META-INF/MANIFEST.MF
@@ -68,7 +68,7 @@ Require-Bundle: org.eclipse.emf.ecore.edit;bundle-version="[2.9.0,3.0.0)",
Bundle-Vendor: %providerName
Bundle-ActivationPolicy: lazy
Bundle-ClassPath: .
-Bundle-Version: 2.0.0.qualifier
+Bundle-Version: 2.0.100.qualifier
Bundle-Localization: plugin
Bundle-Name: %pluginName
Bundle-Activator: org.eclipse.papyrus.infra.gmfdiag.common.Activator
diff --git a/plugins/infra/gmfdiag/org.eclipse.papyrus.infra.gmfdiag.common/plugin.xml b/plugins/infra/gmfdiag/org.eclipse.papyrus.infra.gmfdiag.common/plugin.xml
index c5a59ba0347..f9ef42b7cb7 100644
--- a/plugins/infra/gmfdiag/org.eclipse.papyrus.infra.gmfdiag.common/plugin.xml
+++ b/plugins/infra/gmfdiag/org.eclipse.papyrus.infra.gmfdiag.common/plugin.xml
@@ -80,6 +80,10 @@
description="Model for notation"
fileExtension="notation"
required="true">
+
+