Skip to main content
aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Watson2015-04-15 21:13:26 +0000
committerThomas Watson2015-04-15 21:13:26 +0000
commit1a8689effb7e188e7a8fc2af1c1df95c5afb55b8 (patch)
tree2f17b802f4493326aac7d0d96a659d32e5349799
parent85dfd1b51849512b64f5785dfe85f68b6d8fac06 (diff)
downloadrt.equinox.framework-1a8689effb7e188e7a8fc2af1c1df95c5afb55b8.tar.gz
rt.equinox.framework-1a8689effb7e188e7a8fc2af1c1df95c5afb55b8.tar.xz
rt.equinox.framework-1a8689effb7e188e7a8fc2af1c1df95c5afb55b8.zip
Bug 464084 - Update the felix resolver implementation to the latest
-rw-r--r--bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/Candidates.java269
-rw-r--r--bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/ResolverImpl.java417
-rw-r--r--bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/ShadowList.java157
-rw-r--r--bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/util/CopyOnWriteList.java94
-rw-r--r--bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/util/CopyOnWriteSet.java110
-rw-r--r--bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/util/OpenHashMap.java621
-rw-r--r--bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/util/OpenHashMapList.java45
-rw-r--r--bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/util/OpenHashMapSet.java45
-rwxr-xr-xbundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/util/ShadowList.java40
9 files changed, 1341 insertions, 457 deletions
diff --git a/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/Candidates.java b/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/Candidates.java
index 6b946df8e..e5501d488 100644
--- a/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/Candidates.java
+++ b/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/Candidates.java
@@ -24,11 +24,19 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
+import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.TreeMap;
+
+import org.apache.felix.resolver.util.CopyOnWriteSet;
+import org.apache.felix.resolver.util.CopyOnWriteList;
+import org.apache.felix.resolver.util.OpenHashMap;
+import org.apache.felix.resolver.util.OpenHashMapList;
+import org.apache.felix.resolver.util.OpenHashMapSet;
+import org.apache.felix.resolver.util.ShadowList;
import org.osgi.framework.Version;
import org.osgi.framework.namespace.HostNamespace;
import org.osgi.framework.namespace.IdentityNamespace;
@@ -49,9 +57,9 @@ class Candidates
private final Set<Resource> m_mandatoryResources;
// Maps a capability to requirements that match it.
- private final Map<Capability, Set<Requirement>> m_dependentMap;
+ private final OpenHashMapSet<Capability, Requirement> m_dependentMap;
// Maps a requirement to the capability it matches.
- private final Map<Requirement, List<Capability>> m_candidateMap;
+ private final OpenHashMapList<Requirement, Capability> m_candidateMap;
// Maps a bundle revision to its associated wrapped revision; this only happens
// when a revision being resolved has fragments to attach to it.
private final Map<Resource, WrappedResource> m_allWrappedHosts;
@@ -65,23 +73,20 @@ class Candidates
private final Map<Capability, Requirement> m_subtitutableMap;
+ private final OpenHashMapSet<Requirement, Capability> m_path;
+
/**
* Private copy constructor used by the copy() method.
- *
- * @param dependentMap the capability dependency map.
- * @param candidateMap the requirement candidate map.
- * @param hostFragments the fragment map.
- * @param wrappedHosts the wrapped hosts map.
- * @param substitutableMap
*/
private Candidates(
Set<Resource> mandatoryResources,
- Map<Capability, Set<Requirement>> dependentMap,
- Map<Requirement, List<Capability>> candidateMap,
+ OpenHashMapSet<Capability, Requirement> dependentMap,
+ OpenHashMapList<Requirement, Capability> candidateMap,
Map<Resource, WrappedResource> wrappedHosts, Map<Resource, Object> populateResultCache,
boolean fragmentsPresent,
Map<Resource, Boolean> onDemandResources,
- Map<Capability, Requirement> substitutableMap)
+ Map<Capability, Requirement> substitutableMap,
+ OpenHashMapSet<Requirement, Capability> path)
{
m_mandatoryResources = mandatoryResources;
m_dependentMap = dependentMap;
@@ -91,6 +96,7 @@ class Candidates
m_fragmentsPresent = fragmentsPresent;
m_validOnDemandResources = onDemandResources;
m_subtitutableMap = substitutableMap;
+ m_path = path;
}
/**
@@ -99,12 +105,17 @@ class Candidates
public Candidates(Map<Resource, Boolean> validOnDemandResources)
{
m_mandatoryResources = new HashSet<Resource>();
- m_dependentMap = new HashMap<Capability, Set<Requirement>>();
- m_candidateMap = new HashMap<Requirement, List<Capability>>();
+ m_dependentMap = new OpenHashMapSet<Capability, Requirement>();
+ m_candidateMap = new OpenHashMapList<Requirement, Capability>();
m_allWrappedHosts = new HashMap<Resource, WrappedResource>();
- m_populateResultCache = new HashMap<Resource, Object>();
+ m_populateResultCache = new LinkedHashMap<Resource, Object>();
m_validOnDemandResources = validOnDemandResources;
- m_subtitutableMap = new HashMap<Capability, Requirement>();
+ m_subtitutableMap = new LinkedHashMap<Capability, Requirement>();
+ m_path = new OpenHashMapSet<Requirement, Capability>(3);
+ }
+
+ public Object getPath() {
+ return m_path;
}
/**
@@ -173,10 +184,11 @@ class Candidates
/**
* Populates candidates for the specified revision.
*
- * @param state the resolver state used for populating the candidates.
- * @param revision the revision whose candidates should be populated.
+ * @param rc the resolver state used for populating the candidates.
+ * @param resource the revision whose candidates should be populated.
*/
// TODO: FELIX3 - Modify to not be recursive.
+ @SuppressWarnings("unchecked")
private void populateResource(ResolveContext rc, Resource resource) throws ResolutionException
{
// Determine if we've already calculated this revision's candidates.
@@ -222,8 +234,7 @@ class Candidates
else if (cacheValue != null)
{
// Increment and get the cycle count.
- cycleCount = (Integer) (((Object[]) cacheValue)[0] =
- new Integer(((Integer) ((Object[]) cacheValue)[0]).intValue() + 1));
+ cycleCount = (Integer) (((Object[]) cacheValue)[0] = (Integer) ((Object[]) cacheValue)[0] + 1);
// Get the already populated candidates.
localCandidateMap = (Map) ((Object[]) cacheValue)[1];
// Get the remaining requirements.
@@ -236,14 +247,14 @@ class Candidates
if ((remainingReqs == null) && (localCandidateMap == null))
{
// Record cycle count.
- cycleCount = new Integer(0);
+ cycleCount = 0;
// Create a local map for populating candidates first, just in case
// the revision is not resolvable.
- localCandidateMap = new HashMap();
+ localCandidateMap = new HashMap<Requirement, List<Capability>>();
// Create a modifiable list of the revision's requirements.
- remainingReqs = new ArrayList(resource.getRequirements(null));
+ remainingReqs = new ArrayList<Requirement>(resource.getRequirements(null));
// Add these value to the result cache so we know we are
// in the middle of populating candidates for the current
@@ -309,14 +320,30 @@ class Candidates
// If we are exiting from a cycle then decrement
// cycle counter, otherwise record the result.
- if (cycleCount.intValue() > 0)
+ if (cycleCount > 0)
{
- ((Object[]) cacheValue)[0] = new Integer(cycleCount.intValue() - 1);
+ ((Object[]) cacheValue)[0] = cycleCount - 1;
}
- else if (cycleCount.intValue() == 0)
+ else if (cycleCount == 0)
{
// Record that the revision was successfully populated.
m_populateResultCache.put(resource, Boolean.TRUE);
+ // FELIX-4825: Verify candidate map in case of cycles and optional requirements
+ for (Iterator<Map.Entry<Requirement, List<Capability>>> it = localCandidateMap.entrySet().iterator(); it.hasNext();)
+ {
+ Map.Entry<Requirement, List<Capability>> entry = it.next();
+ for (Iterator<Capability> it2 = entry.getValue().iterator(); it2.hasNext();)
+ {
+ if (m_populateResultCache.get(it2.next().getResource()) instanceof ResolutionException)
+ {
+ it2.remove();
+ }
+ }
+ if (entry.getValue().isEmpty())
+ {
+ it.remove();
+ }
+ }
// Merge local candidate map into global candidate map.
if (localCandidateMap.size() > 0)
{
@@ -369,11 +396,11 @@ class Candidates
{
return;
}
- Map<String, Collection<Capability>> exportNames = new HashMap<String, Collection<Capability>>();
+ Map<String, List<Capability>> exportNames = new LinkedHashMap<String, List<Capability>>();
for (Capability packageExport : packageExports)
{
String packageName = (String) packageExport.getAttributes().get(PackageNamespace.PACKAGE_NAMESPACE);
- Collection<Capability> caps = exportNames.get(packageName);
+ List<Capability> caps = exportNames.get(packageName);
if (caps == null)
{
caps = new ArrayList<Capability>(1);
@@ -388,7 +415,7 @@ class Candidates
if (substitutes != null && !substitutes.isEmpty())
{
String packageName = (String) substitutes.iterator().next().getAttributes().get(PackageNamespace.PACKAGE_NAMESPACE);
- Collection<Capability> exportedPackages = exportNames.get(packageName);
+ List<Capability> exportedPackages = exportNames.get(packageName);
if (exportedPackages != null)
{
// The package is exported;
@@ -417,7 +444,7 @@ class Candidates
void checkSubstitutes(List<Candidates> importPermutations) throws ResolutionException
{
- Map<Capability, Integer> substituteStatuses = new HashMap<Capability, Integer>(m_subtitutableMap.size());
+ Map<Capability, Integer> substituteStatuses = new LinkedHashMap<Capability, Integer>(m_subtitutableMap.size());
for (Capability substitutable : m_subtitutableMap.keySet())
{
// initialize with unprocessed
@@ -444,7 +471,7 @@ class Candidates
Requirement substitutedReq = m_subtitutableMap.get(substituteStatus.getKey());
if (substitutedReq != null)
{
- ResolverImpl.permutateIfNeeded(this, substitutedReq, importPermutations);
+ permutateIfNeeded(substitutedReq, importPermutations);
}
Set<Requirement> dependents = m_dependentMap.get(substituteStatus.getKey());
if (dependents != null)
@@ -480,7 +507,7 @@ class Candidates
{
if (Util.isOptional(dependent))
{
- clearCandidates(dependent);
+ m_candidateMap.remove(dependent);
}
else
{
@@ -503,7 +530,7 @@ class Candidates
return false;
}
- switch (substituteState.intValue())
+ switch (substituteState)
{
case PROCESSING:
// found a cycle mark the initiator as not substituted
@@ -529,9 +556,8 @@ class Candidates
List<Capability> substitutes = m_candidateMap.get(substitutableReq);
if (substitutes != null)
{
- for (Iterator<Capability> iSubstitutes = substitutes.iterator(); iSubstitutes.hasNext();)
+ for (Capability substituteCandidate : substitutes)
{
- Capability substituteCandidate = iSubstitutes.next();
if (substituteCandidate.getResource().equals(substitutableCap.getResource()))
{
substituteStatuses.put(substitutableCap, EXPORTED);
@@ -585,8 +611,8 @@ class Candidates
* fragments, since fragment capabilities only appear once, but technically
* each host represents a unique capability.
*
- * @param state the resolver state.
- * @param revision the revision being resolved.
+ * @param rc the resolver state.
+ * @param resource the revision being resolved.
* @param candidates the candidates to process.
* @return a resolve exception to be re-thrown, if any, or null.
*/
@@ -739,7 +765,7 @@ class Candidates
}
// Record the candidates.
- m_candidateMap.put(req, candidates);
+ m_candidateMap.put(req, new CopyOnWriteList<Capability>(candidates));
}
/**
@@ -781,12 +807,54 @@ class Candidates
*/
public List<Capability> getCandidates(Requirement req)
{
- return m_candidateMap.get(req);
+ List<Capability> candidates = m_candidateMap.get(req);
+ if (candidates != null)
+ {
+ return Collections.unmodifiableList(candidates);
+ }
+ return null;
+ }
+
+ public Capability getFirstCandidate(Requirement req)
+ {
+ List<Capability> candidates = m_candidateMap.get(req);
+ if (candidates != null && !candidates.isEmpty())
+ {
+ return m_candidateMap.get(req).get(0);
+ }
+ return null;
}
- public void clearCandidates(Requirement req)
+ public void removeFirstCandidate(Requirement req)
{
- m_candidateMap.remove(req);
+ List<Capability> candidates = m_candidateMap.get(req);
+ // Remove the conflicting candidate.
+ Capability cap = candidates.remove(0);
+ if (candidates.isEmpty())
+ {
+ m_candidateMap.remove(req);
+ }
+ // Update resolution path
+ CopyOnWriteSet<Capability> capPath = m_path.get(req);
+ if (capPath == null) {
+ capPath = new CopyOnWriteSet<Capability>();
+ m_path.put(req, capPath);
+ }
+ capPath.add(cap);
+ }
+
+ public List<Capability> clearCandidates(Requirement req, Collection<Capability> caps)
+ {
+ List<Capability> l = m_candidateMap.get(req);
+ l.removeAll(caps);
+ // Update resolution path
+ CopyOnWriteSet<Capability> capPath = m_path.get(req);
+ if (capPath == null) {
+ capPath = new CopyOnWriteSet<Capability>();
+ m_path.put(req, capPath);
+ }
+ capPath.addAll(caps);
+ return l;
}
/**
@@ -803,7 +871,7 @@ class Candidates
* satisfied by the fragment will end up having the two hosts as potential
* candidates, rather than the single fragment.
*
- * @throws ResolutionException if the removal of any unselected fragments
+ * @throws org.osgi.service.resolver.ResolutionException if the removal of any unselected fragments
* result in the root module being unable to resolve.
*/
public void prepare(ResolveContext rc) throws ResolutionException
@@ -812,7 +880,7 @@ class Candidates
// the fragment map maps a fragment symbolic name to a map that maps
// a version to a list of fragments requirements matching that symbolic
// name and version.
- Map<Capability, Map<String, Map<Version, List<Requirement>>>> hostFragments = Collections.EMPTY_MAP;
+ Map<Capability, Map<String, Map<Version, List<Requirement>>>> hostFragments = Collections.emptyMap();
if (m_fragmentsPresent)
{
hostFragments = populateDependents();
@@ -908,10 +976,10 @@ class Candidates
// from the dependent map, but you can't since it may come from
// a fragment that is attached to multiple hosts, so each host
// will need to make their own copy.
- Set<Requirement> dependents = m_dependentMap.get(origCap);
+ CopyOnWriteSet<Requirement> dependents = m_dependentMap.get(origCap);
if (dependents != null)
{
- dependents = new HashSet<Requirement>(dependents);
+ dependents = new CopyOnWriteSet<Requirement>(dependents);
m_dependentMap.put(c, dependents);
for (Requirement r : dependents)
{
@@ -945,8 +1013,7 @@ class Candidates
List<Capability> cands = m_candidateMap.get(r);
if (!(cands instanceof ShadowList))
{
- ShadowList<Capability> shadow =
- new ShadowList<Capability>(cands);
+ ShadowList<Capability> shadow = new ShadowList<Capability>(cands);
m_candidateMap.put(r, shadow);
cands = shadow;
}
@@ -956,7 +1023,7 @@ class Candidates
// shadow copy of the list accordingly.
if (!origCap.getResource().equals(hostResource.getDeclaredResource()))
{
- List<Capability> original = ((ShadowList) cands).getOriginal();
+ List<Capability> original = ((ShadowList<Capability>) cands).getOriginal();
int removeIdx = original.indexOf(origCap);
if (removeIdx != -1)
{
@@ -989,7 +1056,7 @@ class Candidates
List<Capability> cands = m_candidateMap.get(origReq);
if (cands != null)
{
- m_candidateMap.put(r, new ArrayList<Capability>(cands));
+ m_candidateMap.put(r, new CopyOnWriteList<Capability>(cands));
for (Capability cand : cands)
{
Set<Requirement> dependents = m_dependentMap.get(cand);
@@ -1012,6 +1079,9 @@ class Candidates
}
populateSubstitutables();
+
+ m_candidateMap.concat();
+ m_dependentMap.concat();
}
// Maps a host capability to a map containing its potential fragments;
@@ -1022,17 +1092,17 @@ class Candidates
{
Map<Capability, Map<String, Map<Version, List<Requirement>>>> hostFragments =
new HashMap<Capability, Map<String, Map<Version, List<Requirement>>>>();
- for (Entry<Requirement, List<Capability>> entry : m_candidateMap.entrySet())
+ for (Entry<Requirement, CopyOnWriteList<Capability>> entry : m_candidateMap.entrySet())
{
Requirement req = entry.getKey();
List<Capability> caps = entry.getValue();
for (Capability cap : caps)
{
// Record the requirement as dependent on the capability.
- Set<Requirement> dependents = m_dependentMap.get(cap);
+ CopyOnWriteSet<Requirement> dependents = m_dependentMap.get(cap);
if (dependents == null)
{
- dependents = new HashSet<Requirement>();
+ dependents = new CopyOnWriteSet<Requirement>();
m_dependentMap.put(cap, dependents);
}
dependents.add(req);
@@ -1078,8 +1148,8 @@ class Candidates
* become unresolved if they depended on the module's capabilities and there
* is no other candidate.
*
- * @param revision the module to remove.
- * @throws ResolveException if removing the module caused the resolve to
+ * @param resource the module to remove.
+ * @throws ResolutionException if removing the module caused the resolve to
* fail.
*/
private void removeResource(Resource resource, ResolutionException ex)
@@ -1105,11 +1175,11 @@ class Candidates
* involves removing its requirements and its capabilities. This may cause
* other modules to become unresolved as a result.
*
- * @param br the module to remove.
- * @param unresolvedRevisions a list to containing any additional modules
+ * @param resource the module to remove.
+ * @param unresolvedResources a list to containing any additional modules
* that that became unresolved as a result of removing this module and will
* also need to be removed.
- * @throws ResolveException if removing the module caused the resolve to
+ * @throws ResolutionException if removing the module caused the resolve to
* fail.
*/
private void remove(Resource resource, Set<Resource> unresolvedResources)
@@ -1133,8 +1203,6 @@ class Candidates
*/
private void remove(Requirement req)
{
- boolean isFragment = req.getNamespace().equals(HostNamespace.HOST_NAMESPACE);
-
List<Capability> candidates = m_candidateMap.remove(req);
if (candidates != null)
{
@@ -1154,10 +1222,10 @@ class Candidates
* other modules to become unresolved as a result.
*
* @param c the capability to remove.
- * @param unresolvedRevisions a list to containing any additional modules
+ * @param unresolvedResources a list to containing any additional modules
* that that became unresolved as a result of removing this module and will
* also need to be removed.
- * @throws ResolveException if removing the module caused the resolve to
+ * @throws ResolutionException if removing the module caused the resolve to
* fail.
*/
private void remove(Capability c, Set<Resource> unresolvedResources)
@@ -1195,35 +1263,23 @@ class Candidates
*/
public Candidates copy()
{
- Map<Capability, Set<Requirement>> dependentMap =
- new HashMap<Capability, Set<Requirement>>();
- for (Entry<Capability, Set<Requirement>> entry : m_dependentMap.entrySet())
- {
- Set<Requirement> dependents = new HashSet<Requirement>(entry.getValue());
- dependentMap.put(entry.getKey(), dependents);
- }
-
- Map<Requirement, List<Capability>> candidateMap =
- new HashMap<Requirement, List<Capability>>();
- for (Entry<Requirement, List<Capability>> entry
- : m_candidateMap.entrySet())
- {
- List<Capability> candidates =
- new ArrayList<Capability>(entry.getValue());
- candidateMap.put(entry.getKey(), candidates);
- }
-
return new Candidates(
- m_mandatoryResources, dependentMap, candidateMap,
- m_allWrappedHosts, m_populateResultCache, m_fragmentsPresent, m_validOnDemandResources,
- m_subtitutableMap);
+ m_mandatoryResources,
+ m_dependentMap.deepClone(),
+ m_candidateMap.deepClone(),
+ m_allWrappedHosts,
+ m_populateResultCache,
+ m_fragmentsPresent,
+ m_validOnDemandResources,
+ m_subtitutableMap,
+ m_path.deepClone());
}
public void dump(ResolveContext rc)
{
// Create set of all revisions from requirements.
- Set<Resource> resources = new HashSet<Resource>();
- for (Entry<Requirement, List<Capability>> entry
+ Set<Resource> resources = new CopyOnWriteSet<Resource>();
+ for (Entry<Requirement, CopyOnWriteList<Capability>> entry
: m_candidateMap.entrySet())
{
resources.add(entry.getKey().getResource());
@@ -1260,4 +1316,51 @@ class Candidates
}
System.out.println("=== END CANDIDATE MAP ===");
}
+
+ public void permutate(Requirement req, List<Candidates> permutations)
+ {
+ if (!Util.isMultiple(req) && canRemoveCandidate(req))
+ {
+ Candidates perm = copy();
+ perm.removeFirstCandidate(req);
+ permutations.add(perm);
+ }
+ }
+
+ public boolean canRemoveCandidate(Requirement req)
+ {
+ List<Capability> candidates = m_candidateMap.get(req);
+ return ((candidates != null) && (candidates.size() > 1 || Util.isOptional(req)));
+ }
+
+ public void permutateIfNeeded(Requirement req, List<Candidates> permutations)
+ {
+ List<Capability> candidates = m_candidateMap.get(req);
+ if ((candidates != null) && (candidates.size() > 1))
+ {
+ // Check existing permutations to make sure we haven't
+ // already permutated this requirement. This check for
+ // duplicate permutations is simplistic. It assumes if
+ // there is any permutation that contains a different
+ // initial candidate for the requirement in question,
+ // then it has already been permutated.
+ boolean permutated = false;
+ for (Candidates existingPerm : permutations)
+ {
+ List<Capability> existingPermCands = existingPerm.m_candidateMap.get(req);
+ if (existingPermCands != null && !existingPermCands.get(0).equals(candidates.get(0)))
+ {
+ permutated = true;
+ break;
+ }
+ }
+ // If we haven't already permutated the existing
+ // import, do so now.
+ if (!permutated)
+ {
+ permutate(req, permutations);
+ }
+ }
+ }
+
}
diff --git a/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/ResolverImpl.java b/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/ResolverImpl.java
index 21721006d..381240b94 100644
--- a/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/ResolverImpl.java
+++ b/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/ResolverImpl.java
@@ -24,11 +24,12 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
-import java.util.StringTokenizer;
import org.osgi.framework.namespace.BundleNamespace;
import org.osgi.framework.namespace.ExecutionEnvironmentNamespace;
@@ -68,7 +69,9 @@ public class ResolverImpl implements Resolver
// removed the offending capabilities
private Candidates m_multipleCardCandidates = null;
- private final Map<Capability, List<Capability>> m_packageSourcesCache = new HashMap();
+ private final Map<Capability, Set<Capability>> m_packageSourcesCache = new HashMap<Capability, Set<Capability>>(256);
+
+ private final Map<String, List<String>> m_usesCache = new HashMap<String, List<String>>();
ResolveSession(ResolveContext resolveContext)
{
@@ -95,7 +98,7 @@ public class ResolverImpl implements Resolver
m_multipleCardCandidates = multipleCardCandidates;
}
- Map<Capability, List<Capability>> getPackageSourcesCache()
+ Map<Capability, Set<Capability>> getPackageSourcesCache()
{
return m_packageSourcesCache;
}
@@ -104,6 +107,10 @@ public class ResolverImpl implements Resolver
{
return m_resolveContext;
}
+
+ public Map<String, List<String>> getUsesCache() {
+ return m_usesCache;
+ }
}
public ResolverImpl(Logger logger)
@@ -120,8 +127,8 @@ public class ResolverImpl implements Resolver
new HashMap<Resource, Packages>();
// Make copies of arguments in case we want to modify them.
- Collection<Resource> mandatoryResources = new ArrayList(rc.getMandatoryResources());
- Collection<Resource> optionalResources = new ArrayList(rc.getOptionalResources());
+ Collection<Resource> mandatoryResources = new ArrayList<Resource>(rc.getMandatoryResources());
+ Collection<Resource> optionalResources = new ArrayList<Resource>(rc.getOptionalResources());
// keeps track of valid on demand fragments that we have seen.
// a null value or TRUE indicate it is valid
Map<Resource, Boolean> validOnDemandResources = new HashMap<Resource, Boolean>(0);
@@ -170,7 +177,7 @@ public class ResolverImpl implements Resolver
// fragments, since they will only be pulled in if their
// host is already present.
Set<Resource> allResources =
- new HashSet<Resource>(mandatoryResources);
+ new LinkedHashSet<Resource>(mandatoryResources);
for (Resource resource : optionalResources)
{
if (allCandidates.isPopulated(resource))
@@ -190,21 +197,35 @@ public class ResolverImpl implements Resolver
// If a populated resource is a fragment, then its host
// must ultimately be verified, so store its host requirement
// to use for package space calculation.
- Map<Resource, List<Requirement>> hostReqs =
- new HashMap<Resource, List<Requirement>>();
+ Map<Resource, Requirement> hostReqs = new HashMap<Resource, Requirement>();
for (Resource resource : allResources)
{
if (Util.isFragment(resource))
{
hostReqs.put(
resource,
- resource.getRequirements(HostNamespace.HOST_NAMESPACE));
+ resource.getRequirements(HostNamespace.HOST_NAMESPACE).get(0));
}
}
+ Set<Object> donePaths = new HashSet<Object>();
Map<Resource, ResolutionException> faultyResources = null;
do
{
+ allCandidates = (usesPermutations.size() > 0)
+ ? usesPermutations.remove(0)
+ : (importPermutations.size() > 0
+ ? importPermutations.remove(0)
+ : null);
+ if (allCandidates == null)
+ {
+ break;
+ }
+ if (!donePaths.add(allCandidates.getPath()))
+ {
+ continue;
+ }
+
rethrow = null;
resourcePkgMap.clear();
@@ -214,9 +235,6 @@ public class ResolverImpl implements Resolver
// delta of the current permutation.
session.setMultipleCardCandidates(null);
- allCandidates = (usesPermutations.size() > 0)
- ? usesPermutations.remove(0)
- : importPermutations.remove(0);
//allCandidates.dump();
Map<Resource, ResolutionException> currentFaultyResources = null;
@@ -241,16 +259,24 @@ public class ResolverImpl implements Resolver
// If we are resolving a fragment, then get its
// host candidate and verify it instead.
- List<Requirement> hostReq = hostReqs.get(resource);
+ Requirement hostReq = hostReqs.get(resource);
if (hostReq != null)
{
- target = allCandidates.getCandidates(hostReq.get(0))
- .iterator().next().getResource();
+ Capability hostCap = allCandidates.getFirstCandidate(hostReq);
+ // If the resource is an already resolved fragment and can not
+ // be attached to new hosts, there will be no matching host,
+ // so ignore this resource
+ if (hostCap == null)
+ {
+ continue;
+ }
+ target = hostCap.getResource();
}
calculatePackageSpaces(
session, allCandidates.getWrappedHost(target), allCandidates,
- resourcePkgMap, new HashMap(), new HashSet());
+ resourcePkgMap, new HashMap<Capability, Set<Resource>>(256),
+ new HashSet<Resource>(64));
//System.out.println("+++ PACKAGE SPACES START +++");
//dumpResourcePkgMap(resourcePkgMap);
//System.out.println("+++ PACKAGE SPACES END +++");
@@ -296,8 +322,7 @@ public class ResolverImpl implements Resolver
}
}
}
- while ((rethrow != null)
- && ((usesPermutations.size() > 0) || (importPermutations.size() > 0)));
+ while (rethrow != null);
// If there is a resolve exception, then determine if an
// optionally resolved resource is to blame (typically a fragment).
@@ -312,7 +337,7 @@ public class ResolverImpl implements Resolver
for (Resource faultyResource : resourceKeys)
{
Boolean valid = validOnDemandResources.get(faultyResource);
- if (valid != null && valid.booleanValue())
+ if (valid != null && valid)
{
// This was an ondemand resource.
// Invalidate it and try again.
@@ -348,11 +373,18 @@ public class ResolverImpl implements Resolver
// If we are resolving a fragment, then we
// actually want to populate its host's wires.
- List<Requirement> hostReq = hostReqs.get(resource);
+ Requirement hostReq = hostReqs.get(resource);
if (hostReq != null)
{
- target = allCandidates.getCandidates(hostReq.get(0))
- .iterator().next().getResource();
+ Capability hostCap = allCandidates.getFirstCandidate(hostReq);
+ // If the resource is an already resolved fragment and can not
+ // be attached to new hosts, there will be no matching host,
+ // so ignore this resource
+ if (hostCap == null)
+ {
+ continue;
+ }
+ target = hostCap.getResource();
}
if (allCandidates.isPopulated(target))
@@ -454,7 +486,7 @@ public class ResolverImpl implements Resolver
// Record the initial candidate permutation.
usesPermutations.add(allCandidates);
- ResolutionException rethrow = null;
+ ResolutionException rethrow;
do
{
@@ -484,7 +516,8 @@ public class ResolverImpl implements Resolver
calculatePackageSpaces(session,
allCandidates.getWrappedHost(host), allCandidates,
- resourcePkgMap, new HashMap(), new HashSet());
+ resourcePkgMap, new HashMap<Capability, Set<Resource>>(256),
+ new HashSet<Resource>(64));
//System.out.println("+++ PACKAGE SPACES START +++");
//dumpResourcePkgMap(resourcePkgMap);
//System.out.println("+++ PACKAGE SPACES END +++");
@@ -493,7 +526,7 @@ public class ResolverImpl implements Resolver
{
checkDynamicPackageSpaceConsistency(session,
allCandidates.getWrappedHost(host),
- allCandidates, resourcePkgMap, new HashMap());
+ allCandidates, resourcePkgMap, new HashMap<Resource, Object>(64));
}
catch (ResolutionException ex)
{
@@ -524,7 +557,7 @@ public class ResolverImpl implements Resolver
.getDeclaredRequirement().getResource();
}
Boolean valid = onDemandResources.get(faultyResource);
- if (valid != null && valid.booleanValue())
+ if (valid != null && valid)
{
onDemandResources.put(faultyResource, Boolean.FALSE);
retry = true;
@@ -571,7 +604,7 @@ public class ResolverImpl implements Resolver
Resource resource,
Candidates allCandidates,
Map<Resource, Packages> resourcePkgMap,
- Map<Capability, List<Resource>> usesCycleMap,
+ Map<Capability, Set<Resource>> usesCycleMap,
Set<Resource> cycle)
{
if (cycle.contains(resource))
@@ -598,8 +631,8 @@ public class ResolverImpl implements Resolver
// capability or actual capability if resource is resolved or not.
// We use parallel lists so we can calculate the packages spaces for
// resolved and unresolved resources in an identical fashion.
- List<Requirement> reqs = new ArrayList();
- List<Capability> caps = new ArrayList();
+ List<Requirement> reqs = new ArrayList<Requirement>();
+ List<Capability> caps = new ArrayList<Capability>();
boolean isDynamicImporting = false;
Wiring wiring = session.getContext().getWirings().get(resource);
if (wiring != null)
@@ -732,14 +765,14 @@ public class ResolverImpl implements Resolver
mergeCandidatePackages(
session.getContext(), resource, req, cap, resourcePkgMap, allCandidates,
- new HashMap<Resource, List<Capability>>(), new HashMap<Resource, List<Resource>>());
+ new HashMap<Resource, Set<Capability>>(), new HashMap<Resource, Set<Resource>>());
}
// Third, have all candidates to calculate their package spaces.
- for (int i = 0; i < caps.size(); i++)
+ for (Capability cap : caps)
{
calculatePackageSpaces(
- session, caps.get(i).getResource(), allCandidates, resourcePkgMap,
+ session, cap.getResource(), allCandidates, resourcePkgMap,
usesCycleMap, cycle);
}
@@ -829,19 +862,19 @@ public class ResolverImpl implements Resolver
private void mergeCandidatePackages(
ResolveContext rc, Resource current, Requirement currentReq,
Capability candCap, Map<Resource, Packages> resourcePkgMap,
- Candidates allCandidates, Map<Resource, List<Capability>> cycles, HashMap<Resource, List<Resource>> visitedRequiredBundlesMap)
+ Candidates allCandidates, Map<Resource, Set<Capability>> cycles,
+ HashMap<Resource, Set<Resource>> visitedRequiredBundlesMap)
{
- List<Capability> cycleCaps = cycles.get(current);
+ Set<Capability> cycleCaps = cycles.get(current);
if (cycleCaps == null)
{
- cycleCaps = new ArrayList<Capability>();
+ cycleCaps = new HashSet<Capability>();
cycles.put(current, cycleCaps);
}
- if (cycleCaps.contains(candCap))
+ if (!cycleCaps.add(candCap))
{
return;
}
- cycleCaps.add(candCap);
if (candCap.getNamespace().equals(PackageNamespace.PACKAGE_NAMESPACE))
{
@@ -857,17 +890,15 @@ public class ResolverImpl implements Resolver
// Get the candidate's package space to determine which packages
// will be visible to the current resource.
Packages candPkgs = resourcePkgMap.get(candCap.getResource());
-
- List<Resource> visitedRequiredBundles = visitedRequiredBundlesMap.get(current);
+
+ Set<Resource> visitedRequiredBundles = visitedRequiredBundlesMap.get(current);
if (visitedRequiredBundles == null)
{
- visitedRequiredBundles = new ArrayList<Resource>();
- visitedRequiredBundlesMap.put(current, visitedRequiredBundles);
+ visitedRequiredBundles = new HashSet<Resource>();
+ visitedRequiredBundlesMap.put(current, visitedRequiredBundles);
}
- if (!visitedRequiredBundles.contains(candCap.getResource()))
+ if (visitedRequiredBundles.add(candCap.getResource()))
{
- visitedRequiredBundles.add(candCap.getResource());
-
// We have to merge all exported packages from the candidate,
// since the current resource requires it.
for (Entry<String, Blame> entry : candPkgs.m_exportedPkgs.entrySet())
@@ -919,17 +950,20 @@ public class ResolverImpl implements Resolver
req.getDirectives()
.get(BundleNamespace.REQUIREMENT_VISIBILITY_DIRECTIVE);
if ((value != null)
- && value.equals(BundleNamespace.VISIBILITY_REEXPORT)
- && (allCandidates.getCandidates(req) != null))
+ && value.equals(BundleNamespace.VISIBILITY_REEXPORT))
{
- mergeCandidatePackages(
- rc,
- current,
- currentReq,
- allCandidates.getCandidates(req).iterator().next(),
- resourcePkgMap,
- allCandidates,
- cycles, visitedRequiredBundlesMap);
+ Capability cap = allCandidates.getFirstCandidate(req);
+ if (cap != null) {
+ mergeCandidatePackages(
+ rc,
+ current,
+ currentReq,
+ cap,
+ resourcePkgMap,
+ allCandidates,
+ cycles,
+ visitedRequiredBundlesMap);
+ }
}
}
}
@@ -976,7 +1010,7 @@ public class ResolverImpl implements Resolver
Capability mergeCap, List<Requirement> blameReqs, Capability matchingCap,
Map<Resource, Packages> resourcePkgMap,
Candidates allCandidates,
- Map<Capability, List<Resource>> cycleMap)
+ Map<Capability, Set<Resource>> cycleMap)
{
// If there are no uses, then just return.
// If the candidate resource is the same as the current resource,
@@ -988,14 +1022,16 @@ public class ResolverImpl implements Resolver
}
// Check for cycles.
- List<Resource> list = cycleMap.get(mergeCap);
- if ((list != null) && list.contains(current))
+ Set<Resource> set = cycleMap.get(mergeCap);
+ if (set == null)
+ {
+ set = new HashSet<Resource>();
+ cycleMap.put(mergeCap, set);
+ }
+ if (!set.add(current))
{
return;
}
- list = (list == null) ? new ArrayList<Resource>() : list;
- list.add(current);
- cycleMap.put(mergeCap, list);
for (Capability candSourceCap : getPackageSources(session, mergeCap, resourcePkgMap))
{
@@ -1007,19 +1043,21 @@ public class ResolverImpl implements Resolver
// }
// else
{
- uses = Collections.EMPTY_LIST;
- String s = candSourceCap.getDirectives()
- .get(Namespace.CAPABILITY_USES_DIRECTIVE);
+ String s = candSourceCap.getDirectives().get(Namespace.CAPABILITY_USES_DIRECTIVE);
if (s != null)
{
// Parse these uses directive.
- StringTokenizer tok = new StringTokenizer(s, ",");
- uses = new ArrayList(tok.countTokens());
- while (tok.hasMoreTokens())
+ uses = session.getUsesCache().get(s);
+ if (uses == null)
{
- uses.add(tok.nextToken().trim());
+ uses = parseUses(s);
+ session.getUsesCache().put(s, uses);
}
}
+ else
+ {
+ uses = Collections.emptyList();
+ }
}
for (String usedPkgName : uses)
{
@@ -1029,7 +1067,7 @@ public class ResolverImpl implements Resolver
Blame candExportedBlame = candSourcePkgs.m_exportedPkgs.get(usedPkgName);
if (candExportedBlame != null)
{
- candSourceBlames = new ArrayList(1);
+ candSourceBlames = new ArrayList<Blame>(1);
candSourceBlames.add(candExportedBlame);
}
else
@@ -1050,10 +1088,10 @@ public class ResolverImpl implements Resolver
continue;
}
- List<UsedBlames> usedPkgBlames = currentPkgs.m_usedPkgs.get(usedPkgName);
+ Map<Capability, UsedBlames> usedPkgBlames = currentPkgs.m_usedPkgs.get(usedPkgName);
if (usedPkgBlames == null)
{
- usedPkgBlames = new ArrayList<UsedBlames>();
+ usedPkgBlames = new LinkedHashMap<Capability, UsedBlames>();
currentPkgs.m_usedPkgs.put(usedPkgName, usedPkgBlames);
}
for (Blame blame : candSourceBlames)
@@ -1079,28 +1117,56 @@ public class ResolverImpl implements Resolver
}
}
+ private static List<String> parseUses(String s) {
+ int nb = 1;
+ int l = s.length();
+ for (int i = 0; i < l; i++) {
+ if (s.charAt(i) == ',') {
+ nb++;
+ }
+ }
+ List<String> uses = new ArrayList<String>(nb);
+ int start = 0;
+ while (true) {
+ while (start < l) {
+ char c = s.charAt(start);
+ if (c != ' ' && c != ',') {
+ break;
+ }
+ start++;
+ }
+ int end = start + 1;
+ while (end < l) {
+ char c = s.charAt(end);
+ if (c == ' ' || c == ',') {
+ break;
+ }
+ end++;
+ }
+ if (start < l) {
+ uses.add(s.substring(start, end));
+ start = end + 1;
+ } else {
+ break;
+ }
+ }
+ return uses;
+ }
+
private static void addUsedBlame(
- List<UsedBlames> usedBlames, Capability usedCap,
+ Map<Capability, UsedBlames> usedBlames, Capability usedCap,
List<Requirement> blameReqs, Capability matchingCap)
{
// Create a new Blame based off the used capability and the
// blame chain requirements.
Blame newBlame = new Blame(usedCap, blameReqs);
// Find UsedBlame that uses the same capablity as the new blame.
- UsedBlames addToBlame = null;
- for (UsedBlames usedBlame : usedBlames)
- {
- if (usedCap.equals(usedBlame.m_cap))
- {
- addToBlame = usedBlame;
- break;
- }
- }
+ UsedBlames addToBlame = usedBlames.get(usedCap);
if (addToBlame == null)
{
// If none exist create a new UsedBlame for the capability.
addToBlame = new UsedBlames(usedCap);
- usedBlames.add(addToBlame);
+ usedBlames.put(usedCap, addToBlame);
}
// Add the new Blame and record the matching capability cause
// in case the root requirement has multiple cardinality.
@@ -1161,9 +1227,9 @@ public class ResolverImpl implements Resolver
else if (!sourceBlame.m_cap.getResource().equals(blame.m_cap.getResource()))
{
// Try to permutate the conflicting requirement.
- permutate(allCandidates, blame.m_reqs.get(0), importPermutations);
+ allCandidates.permutate(blame.m_reqs.get(0), importPermutations);
// Try to permutate the source requirement.
- permutate(allCandidates, sourceBlame.m_reqs.get(0), importPermutations);
+ allCandidates.permutate(sourceBlame.m_reqs.get(0), importPermutations);
// Report conflict.
ResolutionException ex = new ResolutionException(
"Uses constraint violation. Unable to resolve resource "
@@ -1203,7 +1269,7 @@ public class ResolverImpl implements Resolver
{
continue;
}
- for (UsedBlames usedBlames : pkgs.m_usedPkgs.get(pkgName))
+ for (UsedBlames usedBlames : pkgs.m_usedPkgs.get(pkgName).values())
{
if (!isCompatible(session, Collections.singletonList(exportBlame), usedBlames.m_cap, resourcePkgMap))
{
@@ -1259,17 +1325,9 @@ public class ResolverImpl implements Resolver
// See if we can permutate the candidates for blamed
// requirement; there may be no candidates if the resource
// associated with the requirement is already resolved.
- List<Capability> candidates = permutation.getCandidates(req);
- if ((candidates != null) && (candidates.size() > 1 || Util.isOptional(req)))
- {
+ if (permutation.canRemoveCandidate(req)) {
+ permutation.removeFirstCandidate(req);
mutated.add(req);
- // Remove the conflicting candidate.
- candidates.remove(0);
- if (candidates.isEmpty())
- {
- permutation.clearCandidates(req);
- }
- // Continue with the next uses constraint.
break;
}
}
@@ -1297,7 +1355,8 @@ public class ResolverImpl implements Resolver
// Imported packages are added after required packages because they shadow or override
// the packages from required bundles.
Map<String, List<Blame>> allImportRequirePkgs =
- new HashMap<String, List<Blame>>(pkgs.m_requiredPkgs);
+ new LinkedHashMap<String, List<Blame>>(pkgs.m_requiredPkgs.size() + pkgs.m_importedPkgs.size());
+ allImportRequirePkgs.putAll(pkgs.m_requiredPkgs);
allImportRequirePkgs.putAll(pkgs.m_importedPkgs);
for (Entry<String, List<Blame>> requirementBlames : allImportRequirePkgs.entrySet())
@@ -1308,7 +1367,7 @@ public class ResolverImpl implements Resolver
continue;
}
- for (UsedBlames usedBlames : pkgs.m_usedPkgs.get(pkgName))
+ for (UsedBlames usedBlames : pkgs.m_usedPkgs.get(pkgName).values())
{
if (!isCompatible(session, requirementBlames.getValue(), usedBlames.m_cap, resourcePkgMap))
{
@@ -1372,17 +1431,9 @@ public class ResolverImpl implements Resolver
// See if we can permutate the candidates for blamed
// requirement; there may be no candidates if the resource
// associated with the requirement is already resolved.
- List<Capability> candidates = permutation.getCandidates(req);
- if ((candidates != null) && (candidates.size() > 1 || Util.isOptional(req)))
- {
+ if (permutation.canRemoveCandidate(req)) {
+ permutation.removeFirstCandidate(req);
mutated.add(req);
- // Remove the conflicting candidate.
- candidates.remove(0);
- if (candidates.isEmpty())
- {
- permutation.clearCandidates(req);
- }
- // Continue with the next uses constraint.
break;
}
}
@@ -1415,7 +1466,7 @@ public class ResolverImpl implements Resolver
// with existing import decisions, we may end up trying
// to permutate the same import a lot of times, so we should
// try to check if that the case and only permutate it once.
- permutateIfNeeded(allCandidates, req, importPermutations);
+ allCandidates.permutateIfNeeded(req, importPermutations);
}
}
@@ -1438,10 +1489,9 @@ public class ResolverImpl implements Resolver
int permCount = usesPermutations.size() + importPermutations.size();
for (Requirement req : resource.getRequirements(null))
{
- List<Capability> cands = allCandidates.getCandidates(req);
- if (cands != null && !cands.isEmpty())
+ Capability cap = allCandidates.getFirstCandidate(req);
+ if (cap != null)
{
- Capability cap = cands.get(0);
if (!resource.equals(cap.getResource()))
{
try
@@ -1458,7 +1508,7 @@ public class ResolverImpl implements Resolver
// to backtrack on our current candidate selection.
if (permCount == (usesPermutations.size() + importPermutations.size()))
{
- permutate(allCandidates, req, importPermutations);
+ allCandidates.permutate(req, importPermutations);
}
throw ex;
}
@@ -1487,64 +1537,13 @@ public class ResolverImpl implements Resolver
}
// Get the current candidate list and remove all the offending root
// cause candidates from a copy of the current permutation.
- candidates = session.getMultipleCardCandidates().getCandidates(req);
- candidates.removeAll(usedBlames.getRootCauses(req));
+ candidates = session.getMultipleCardCandidates().clearCandidates(req, usedBlames.getRootCauses(req));
}
// We only are successful if there is at least one candidate left
// for the requirement
return (candidates != null) && !candidates.isEmpty();
}
- private static void permutate(
- Candidates allCandidates, Requirement req, List<Candidates> permutations)
- {
- if (!Util.isMultiple(req))
- {
- List<Capability> candidates = allCandidates.getCandidates(req);
- if ((candidates != null) && (candidates.size() > 1 || Util.isOptional(req)))
- {
- Candidates perm = allCandidates.copy();
- candidates = perm.getCandidates(req);
- candidates.remove(0);
- if (candidates.isEmpty())
- {
- perm.clearCandidates(req);
- }
- permutations.add(perm);
- }
- }
- }
-
- static void permutateIfNeeded(
- Candidates allCandidates, Requirement req, List<Candidates> permutations)
- {
- List<Capability> candidates = allCandidates.getCandidates(req);
- if ((candidates != null) && (candidates.size() > 1))
- {
- // Check existing permutations to make sure we haven't
- // already permutated this requirement. This check for
- // duplicate permutations is simplistic. It assumes if
- // there is any permutation that contains a different
- // initial candidate for the requirement in question,
- // then it has already been permutated.
- boolean permutated = false;
- for (Candidates existingPerm : permutations)
- {
- List<Capability> existingPermCands = existingPerm.getCandidates(req);
- if (existingPermCands != null && !existingPermCands.get(0).equals(candidates.get(0)))
- {
- permutated = true;
- }
- }
- // If we haven't already permutated the existing
- // import, do so now.
- if (!permutated)
- {
- permutate(allCandidates, req, permutations);
- }
- }
- }
-
private static void calculateExportedPackages(
ResolveContext rc,
Resource resource,
@@ -1590,11 +1589,10 @@ public class ResolverImpl implements Resolver
{
if (req.getNamespace().equals(PackageNamespace.PACKAGE_NAMESPACE))
{
- List<Capability> cands = allCandidates.getCandidates(req);
- if ((cands != null) && !cands.isEmpty())
+ Capability cand = allCandidates.getFirstCandidate(req);
+ if (cand != null)
{
- String pkgName = (String) cands.get(0)
- .getAttributes().get(PackageNamespace.PACKAGE_NAMESPACE);
+ String pkgName = (String) cand.getAttributes().get(PackageNamespace.PACKAGE_NAMESPACE);
exports.remove(pkgName);
}
}
@@ -1618,7 +1616,7 @@ public class ResolverImpl implements Resolver
{
if ((!currentBlames.isEmpty()) && (candCap != null))
{
- List<Capability> currentSources;
+ Set<Capability> currentSources;
// quick check for single source package
if (currentBlames.size() == 1)
{
@@ -1635,25 +1633,22 @@ public class ResolverImpl implements Resolver
}
else
{
- currentSources = new ArrayList<Capability>(currentBlames.size());
+ currentSources = new HashSet<Capability>(currentBlames.size());
for (Blame currentBlame : currentBlames)
{
- List<Capability> blameSources =
+ Set<Capability> blameSources =
getPackageSources(
session,
currentBlame.m_cap,
resourcePkgMap);
for (Capability blameSource : blameSources)
{
- if (!currentSources.contains(blameSource))
- {
- currentSources.add(blameSource);
- }
+ currentSources.add(blameSource);
}
}
}
- List<Capability> candSources =
+ Set<Capability> candSources =
getPackageSources(
session,
candCap,
@@ -1665,18 +1660,19 @@ public class ResolverImpl implements Resolver
return true;
}
- private List<Capability> getPackageSources(
+ private Set<Capability> getPackageSources(
ResolveSession session, Capability cap, Map<Resource, Packages> resourcePkgMap)
{
- Map<Capability, List<Capability>> packageSourcesCache = session.getPackageSourcesCache();
+ Map<Capability, Set<Capability>> packageSourcesCache = session.getPackageSourcesCache();
// If it is a package, then calculate sources for it.
if (cap.getNamespace().equals(PackageNamespace.PACKAGE_NAMESPACE))
{
- List<Capability> sources = packageSourcesCache.get(cap);
+ Set<Capability> sources = packageSourcesCache.get(cap);
if (sources == null)
{
sources = getPackageSourcesInternal(
- session.getContext(), cap, resourcePkgMap, new ArrayList(), new HashSet());
+ session.getContext(), cap, resourcePkgMap,
+ new HashSet<Capability>(64), new HashSet<Capability>(64));
packageSourcesCache.put(cap, sources);
}
return sources;
@@ -1688,23 +1684,22 @@ public class ResolverImpl implements Resolver
String uses = cap.getDirectives().get(Namespace.CAPABILITY_USES_DIRECTIVE);
if ((uses != null) && (uses.length() > 0))
{
- return Collections.singletonList(cap);
+ return Collections.singleton(cap);
}
- return Collections.EMPTY_LIST;
+ return Collections.emptySet();
}
- private static List<Capability> getPackageSourcesInternal(
+ private static Set<Capability> getPackageSourcesInternal(
ResolveContext rc, Capability cap, Map<Resource, Packages> resourcePkgMap,
- List<Capability> sources, Set<Capability> cycleMap)
+ Set<Capability> sources, Set<Capability> cycleMap)
{
if (cap.getNamespace().equals(PackageNamespace.PACKAGE_NAMESPACE))
{
- if (cycleMap.contains(cap))
+ if (!cycleMap.add(cap))
{
return sources;
}
- cycleMap.add(cap);
// Get the package name associated with the capability.
String pkgName = cap.getAttributes()
@@ -1727,10 +1722,7 @@ public class ResolverImpl implements Resolver
{
sourceCap = new WrappedCapability(cap.getResource(), sourceCap);
}
- if (!sources.contains(sourceCap))
- {
- sources.add(sourceCap);
- }
+ sources.add(sourceCap);
}
}
@@ -1784,7 +1776,7 @@ public class ResolverImpl implements Resolver
if (!rc.getWirings().containsKey(unwrappedResource)
&& !wireMap.containsKey(unwrappedResource))
{
- wireMap.put(unwrappedResource, (List<Wire>) Collections.EMPTY_LIST);
+ wireMap.put(unwrappedResource, Collections.<Wire>emptyList());
List<Wire> packageWires = new ArrayList<Wire>();
List<Wire> bundleWires = new ArrayList<Wire>();
@@ -1815,9 +1807,9 @@ public class ResolverImpl implements Resolver
if (IdentityNamespace.IDENTITY_NAMESPACE.equals(cand.getNamespace())
&& Util.isFragment(targetCand))
{
- targetCand = allCandidates.getCandidates(
- targetCand.getRequirements(HostNamespace.HOST_NAMESPACE).get(0))
- .iterator().next().getResource();
+ targetCand = allCandidates.getFirstCandidate(
+ targetCand.getRequirements(HostNamespace.HOST_NAMESPACE).get(0))
+ .getResource();
targetCand = allCandidates.getWrappedHost(targetCand);
}
@@ -1919,12 +1911,10 @@ public class ResolverImpl implements Resolver
private static Wire createWire(Requirement requirement, Candidates allCandidates)
{
- List<Capability> candidates = allCandidates.getCandidates(requirement);
- if (candidates == null || candidates.isEmpty())
- {
+ Capability cand = allCandidates.getFirstCandidate(requirement);
+ if (cand == null) {
return null;
}
- Capability cand = candidates.get(0);
return new WireImpl(
getDeclaredResource(requirement.getResource()),
getDeclaredRequirement(requirement),
@@ -1952,14 +1942,13 @@ public class ResolverImpl implements Resolver
Map<Resource, Packages> resourcePkgMap,
Map<Resource, List<Wire>> wireMap, Candidates allCandidates)
{
- wireMap.put(resource, (List<Wire>) Collections.EMPTY_LIST);
+ wireMap.put(resource, Collections.<Wire>emptyList());
List<Wire> packageWires = new ArrayList<Wire>();
// Get the candidates for the current dynamic requirement.
- List<Capability> candCaps = allCandidates.getCandidates(dynReq);
// Record the dynamic candidate.
- Capability dynCand = candCaps.get(0);
+ Capability dynCand = allCandidates.getFirstCandidate(dynReq);
if (!rc.getWirings().containsKey(dynCand.getResource()))
{
@@ -2011,16 +2000,16 @@ public class ResolverImpl implements Resolver
System.out.println(" " + entry.getKey() + " - " + entry.getValue());
}
System.out.println(" USED");
- for (Entry<String, List<UsedBlames>> entry : packages.m_usedPkgs.entrySet())
+ for (Entry<String, Map<Capability, UsedBlames>> entry : packages.m_usedPkgs.entrySet())
{
- System.out.println(" " + entry.getKey() + " - " + entry.getValue());
+ System.out.println(" " + entry.getKey() + " - " + entry.getValue().values());
}
}
private static String toStringBlame(
ResolveContext rc, Candidates allCandidates, Blame blame)
{
- StringBuffer sb = new StringBuffer();
+ StringBuilder sb = new StringBuilder();
if ((blame.m_reqs != null) && !blame.m_reqs.isEmpty())
{
for (int i = 0; i < blame.m_reqs.size(); i++)
@@ -2128,18 +2117,12 @@ public class ResolverImpl implements Resolver
private static Capability getSatisfyingCapability(
ResolveContext rc, Candidates allCandidates, Requirement req)
{
- Capability cap = null;
-
// If the requiring revision is not resolved, then check in the
// candidate map for its matching candidate.
- List<Capability> cands = allCandidates.getCandidates(req);
- if (cands != null)
- {
- cap = cands.get(0);
- }
+ Capability cap = allCandidates.getFirstCandidate(req);
// Otherwise, if the requiring revision is resolved then check
// in its wires for the capability satisfying the requirement.
- else if (rc.getWirings().containsKey(req.getResource()))
+ if (cap == null && rc.getWirings().containsKey(req.getResource()))
{
List<Wire> wires =
rc.getWirings().get(req.getResource()).getRequiredResourceWires(null);
@@ -2165,10 +2148,10 @@ public class ResolverImpl implements Resolver
private static class Packages
{
private final Resource m_resource;
- public final Map<String, Blame> m_exportedPkgs = new HashMap();
- public final Map<String, List<Blame>> m_importedPkgs = new HashMap();
- public final Map<String, List<Blame>> m_requiredPkgs = new HashMap();
- public final Map<String, List<UsedBlames>> m_usedPkgs = new HashMap();
+ public final Map<String, Blame> m_exportedPkgs = new LinkedHashMap<String, Blame>(32);
+ public final Map<String, List<Blame>> m_importedPkgs = new LinkedHashMap<String, List<Blame>>(32);
+ public final Map<String, List<Blame>> m_requiredPkgs = new LinkedHashMap<String, List<Blame>>(32);
+ public final Map<String, Map<Capability, UsedBlames>> m_usedPkgs = new LinkedHashMap<String, Map<Capability, UsedBlames>>(32);
public boolean m_isCalculated = false;
public Packages(Resource resource)
@@ -2269,10 +2252,10 @@ public class ResolverImpl implements Resolver
{
if (m_rootCauses == null)
{
- return Collections.EMPTY_SET;
+ return Collections.emptySet();
}
Set<Capability> result = m_rootCauses.get(req);
- return result == null ? Collections.EMPTY_SET : result;
+ return result == null ? Collections.<Capability>emptySet() : result;
}
@Override
diff --git a/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/ShadowList.java b/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/ShadowList.java
deleted file mode 100644
index 05734da46..000000000
--- a/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/ShadowList.java
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.felix.resolver;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import java.util.ListIterator;
-
-public class ShadowList<T> implements List<T>
-{
- private final List<T> m_original;
- private final List<T> m_shadow;
-
- public ShadowList(List<T> original)
- {
- m_original = original;
- m_shadow = new ArrayList<T>(original);
- }
-
- public List<T> getOriginal()
- {
- return m_original;
- }
-
- public int size()
- {
- return m_shadow.size();
- }
-
- public boolean isEmpty()
- {
- return m_shadow.isEmpty();
- }
-
- public boolean contains(Object o)
- {
- return m_shadow.contains(o);
- }
-
- public Iterator<T> iterator()
- {
- return m_shadow.iterator();
- }
-
- public Object[] toArray()
- {
- return m_shadow.toArray();
- }
-
- public <T> T[] toArray(T[] ts)
- {
- return m_shadow.toArray(ts);
- }
-
- public boolean add(T e)
- {
- return m_shadow.add(e);
- }
-
- public boolean remove(Object o)
- {
- return m_shadow.remove(o);
- }
-
- public boolean containsAll(Collection<?> clctn)
- {
- return m_shadow.containsAll(clctn);
- }
-
- public boolean addAll(Collection<? extends T> clctn)
- {
- return m_shadow.addAll(clctn);
- }
-
- public boolean addAll(int i, Collection<? extends T> clctn)
- {
- return m_shadow.addAll(i, clctn);
- }
-
- public boolean removeAll(Collection<?> clctn)
- {
- return m_shadow.removeAll(clctn);
- }
-
- public boolean retainAll(Collection<?> clctn)
- {
- return m_shadow.retainAll(clctn);
- }
-
- public void clear()
- {
- m_shadow.clear();
- }
-
- public T get(int i)
- {
- return m_shadow.get(i);
- }
-
- public T set(int i, T e)
- {
- return m_shadow.set(i, e);
- }
-
- public void add(int i, T e)
- {
- m_shadow.add(i, e);
- }
-
- public T remove(int i)
- {
- return m_shadow.remove(i);
- }
-
- public int indexOf(Object o)
- {
- return m_shadow.indexOf(o);
- }
-
- public int lastIndexOf(Object o)
- {
- return m_shadow.lastIndexOf(o);
- }
-
- public ListIterator<T> listIterator()
- {
- return m_shadow.listIterator();
- }
-
- public ListIterator<T> listIterator(int i)
- {
- return m_shadow.listIterator(i);
- }
-
- public List<T> subList(int i, int i1)
- {
- return m_shadow.subList(i, i1);
- }
-} \ No newline at end of file
diff --git a/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/util/CopyOnWriteList.java b/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/util/CopyOnWriteList.java
new file mode 100644
index 000000000..b22ab30e5
--- /dev/null
+++ b/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/util/CopyOnWriteList.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.felix.resolver.util;
+
+import java.util.AbstractList;
+import java.util.Arrays;
+import java.util.Collection;
+
+public class CopyOnWriteList<T> extends AbstractList<T> {
+
+ Object[] data;
+
+ public CopyOnWriteList() {
+ data = new Object[0];
+ }
+
+ public CopyOnWriteList(Collection<T> col) {
+ if (col instanceof CopyOnWriteList) {
+ data = ((CopyOnWriteList) col).data;
+ } else {
+ data = col.toArray(new Object[col.size()]);
+ }
+ }
+
+ @Override
+ public int size() {
+ return data.length;
+ }
+
+ public T get(int index) {
+ return (T) data[index];
+ }
+
+ @Override
+ public T set(int index, T element) {
+ data = Arrays.copyOf(data, data.length);
+ T prev = (T) data[index];
+ data[index] = element;
+ return prev;
+ }
+
+ @Override
+ public void add(int index, T element) {
+ Object[] elements = data;
+ int len = elements.length;
+ Object[] newElements = new Object[len + 1];
+ int numMoved = len - index;
+ if (index > 0) {
+ System.arraycopy(elements, 0, newElements, 0, index);
+ }
+ if (numMoved > 0) {
+ System.arraycopy(elements, index, newElements, index + 1, numMoved);
+ }
+ newElements[index] = element;
+ data = newElements;
+ }
+
+ public T remove(int index) {
+ Object[] elements = data;
+ int len = elements.length;
+ T oldValue = (T) elements[index];
+ Object[] newElements = new Object[len - 1];
+ int numMoved = len - index - 1;
+ if (index > 0) {
+ System.arraycopy(elements, 0, newElements, 0, index);
+ }
+ if (numMoved > 0) {
+ System.arraycopy(elements, index + 1, newElements, index, numMoved);
+ }
+ data = newElements;
+ return oldValue;
+ }
+
+ @Override
+ public int hashCode() {
+ return Arrays.hashCode(data);
+ }
+}
diff --git a/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/util/CopyOnWriteSet.java b/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/util/CopyOnWriteSet.java
new file mode 100644
index 000000000..239233bf7
--- /dev/null
+++ b/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/util/CopyOnWriteSet.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.felix.resolver.util;
+
+import java.util.AbstractSet;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Iterator;
+
+public class CopyOnWriteSet<E> extends AbstractSet<E> {
+
+ Object[] data;
+
+ public CopyOnWriteSet() {
+ data = new Object[0];
+ }
+
+ public CopyOnWriteSet(Collection<E> col) {
+ if (col instanceof CopyOnWriteSet) {
+ data = ((CopyOnWriteSet) col).data;
+ } else {
+ data = col.toArray(new Object[col.size()]);
+ }
+ }
+
+ @Override
+ public Iterator<E> iterator() {
+ return new Iterator<E>() {
+ int idx = 0;
+ public boolean hasNext() {
+ return idx < data.length;
+ }
+ @SuppressWarnings("unchecked")
+ public E next() {
+ return (E) data[idx++];
+ }
+ public void remove() {
+ CopyOnWriteSet.this.remove(--idx);
+ }
+ };
+ }
+
+ @Override
+ public int size() {
+ return data.length;
+ }
+
+ @Override
+ public boolean add(E e) {
+ Object[] d = data;
+ if (d.length == 0) {
+ data = new Object[] {e};
+ } else {
+ for (Object o : d) {
+ if (o == null ? e == null : o.equals(e)) {
+ return false;
+ }
+ }
+ Object[] a = new Object[d.length + 1];
+ System.arraycopy(d, 0, a, 0, d.length);
+ a[d.length] = e;
+ data = a;
+ }
+ return true;
+ }
+
+ private void remove(int index) {
+ Object[] d = data;
+ int len = d.length;
+ Object[] a = new Object[len - 1];
+ int numMoved = len - index - 1;
+ if (index > 0) {
+ System.arraycopy(d, 0, a, 0, index);
+ }
+ if (numMoved > 0) {
+ System.arraycopy(d, index + 1, a, index, numMoved);
+ }
+ data = a;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (o instanceof CopyOnWriteSet && ((CopyOnWriteSet) o).data == data) {
+ return true;
+ }
+ return super.equals(o);
+ }
+
+ @Override
+ public int hashCode() {
+ return Arrays.hashCode(data);
+ }
+
+}
diff --git a/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/util/OpenHashMap.java b/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/util/OpenHashMap.java
new file mode 100644
index 000000000..8920a6529
--- /dev/null
+++ b/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/util/OpenHashMap.java
@@ -0,0 +1,621 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.felix.resolver.util;
+
+import java.util.AbstractMap;
+import java.util.AbstractSet;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+import java.util.Set;
+
+/**
+ * An open addressing map.
+ * Low memory consumption compared to a HashMap.
+ *
+ * Based on the mahout collection classes.
+ */
+public class OpenHashMap<K, V> extends AbstractMap<K, V> implements Cloneable {
+
+ static final int[] primeCapacities = {
+ 3, 5, 7, 11, 17, 23, 31, 37, 43, 47, 67, 79, 89, 97, 137, 163, 179, 197, 277, 311,
+ 331, 359, 379, 397, 433, 557, 599, 631, 673, 719, 761, 797, 877, 953, 1039, 1117,
+ 1201, 1277, 1361, 1439, 1523, 1597, 1759, 1907, 2081, 2237, 2411, 2557, 2729, 2879,
+ 3049, 3203, 3527, 3821, 4177, 4481, 4831, 5119, 5471, 5779, 6101, 6421, 7057, 7643,
+ 8363, 8963, 9677, 10243, 10949, 11579, 12203, 12853, 14143, 15287, 16729, 17929,
+ 19373, 20507, 21911, 23159, 24407, 25717, 28289, 30577, 33461, 35863, 38747, 41017,
+ 43853, 46327, 48817, 51437, 56591, 61169, 66923, 71741, 77509, 82037, 87719, 92657,
+ 97649, 102877, 113189, 122347, 133853, 143483, 155027, 164089, 175447, 185323,
+ 195311, 205759, 226379, 244703, 267713, 286973, 310081, 328213, 350899, 370661,
+ 390647, 411527, 452759, 489407, 535481, 573953, 620171, 656429, 701819, 741337,
+ 781301, 823117, 905551, 978821, 1070981, 1147921, 1240361, 1312867, 1403641, 1482707,
+ 1562611, 1646237, 1811107, 1957651, 2141977, 2295859, 2480729, 2625761, 2807303,
+ 2965421, 3125257, 3292489, 3622219, 3915341, 4283963, 4591721, 4961459, 5251529,
+ 5614657, 5930887, 6250537, 6584983, 7244441, 7830701, 8567929, 9183457, 9922933,
+ 10503061, 11229331, 11861791, 12501169, 13169977, 14488931, 15661423, 17135863,
+ 18366923, 19845871, 21006137, 22458671, 23723597, 25002389, 26339969, 28977863,
+ 31322867, 34271747, 36733847, 39691759, 42012281, 44917381, 47447201, 50004791,
+ 52679969, 57955739, 62645741, 68543509, 73467739, 79383533, 84024581, 89834777,
+ 94894427, 100009607, 105359939, 115911563, 125291483, 137087021, 146935499,
+ 158767069, 168049163, 179669557, 189788857, 200019221, 210719881, 231823147,
+ 250582987, 274174111, 293871013, 317534141, 336098327, 359339171, 379577741,
+ 400038451, 421439783, 463646329, 501165979, 548348231, 587742049, 635068283,
+ 672196673, 718678369, 759155483, 800076929, 842879579, 927292699, 1002331963,
+ 1096696463, 1175484103, 1270136683, 1344393353, 1437356741, 1518310967,
+ 1600153859, 1685759167, 1854585413, 2004663929, 2147483647
+ };
+ static final int largestPrime = primeCapacities[primeCapacities.length - 1];
+
+ protected static final int defaultCapacity = 277;
+ protected static final double defaultMinLoadFactor = 0.2;
+ protected static final double defaultMaxLoadFactor = 0.5;
+
+ protected static final Object FREE = null;
+ protected static final Object REMOVED = new Object();
+
+ /** The number of distinct associations in the map; its "size()". */
+ protected int distinct;
+
+ /**
+ * The table capacity c=table.length always satisfies the invariant <tt>c * minLoadFactor <= s <= c *
+ * maxLoadFactor</tt>, where s=size() is the number of associations currently contained. The term "c * minLoadFactor"
+ * is called the "lowWaterMark", "c * maxLoadFactor" is called the "highWaterMark". In other words, the table capacity
+ * (and proportionally the memory used by this class) oscillates within these constraints. The terms are precomputed
+ * and cached to avoid recalculating them each time put(..) or removeKey(...) is called.
+ */
+ protected int lowWaterMark;
+ protected int highWaterMark;
+
+ /** The minimum load factor for the hashtable. */
+ protected double minLoadFactor;
+
+ /** The maximum load factor for the hashtable. */
+ protected double maxLoadFactor;
+
+ /** The hash table keys. */
+ protected Object[] table;
+
+ /** The hash table values. */
+ protected Object[] values;
+
+ /** The number of table entries in state==FREE. */
+ protected int freeEntries;
+
+
+ /** Constructs an empty map with default capacity and default load factors. */
+ public OpenHashMap() {
+ this(defaultCapacity);
+ }
+
+ /**
+ * Constructs an empty map with the specified initial capacity and default load factors.
+ *
+ * @param initialCapacity the initial capacity of the map.
+ * @throws IllegalArgumentException if the initial capacity is less than zero.
+ */
+ public OpenHashMap(int initialCapacity) {
+ this(initialCapacity, defaultMinLoadFactor, defaultMaxLoadFactor);
+ }
+
+ /**
+ * Constructs an empty map with the specified initial capacity and the specified minimum and maximum load factor.
+ *
+ * @param initialCapacity the initial capacity.
+ * @param minLoadFactor the minimum load factor.
+ * @param maxLoadFactor the maximum load factor.
+ * @throws IllegalArgumentException if <tt>initialCapacity < 0 || (minLoadFactor < 0.0 || minLoadFactor >= 1.0) ||
+ * (maxLoadFactor <= 0.0 || maxLoadFactor >= 1.0) || (minLoadFactor >=
+ * maxLoadFactor)</tt>.
+ */
+ public OpenHashMap(int initialCapacity, double minLoadFactor, double maxLoadFactor) {
+ setUp(initialCapacity, minLoadFactor, maxLoadFactor);
+ }
+
+ /** Removes all (key,value) associations from the receiver. Implicitly calls <tt>trimToSize()</tt>. */
+ @Override
+ public void clear() {
+ Arrays.fill(this.table, FREE);
+ Arrays.fill(this.values, null);
+ distinct = 0;
+ freeEntries = table.length; // delta
+ trimToSize();
+ }
+
+ /**
+ * Returns a deep copy of the receiver.
+ *
+ * @return a deep copy of the receiver.
+ */
+ @Override
+ @SuppressWarnings("unchecked")
+ public OpenHashMap<K, V> clone() {
+ try {
+ OpenHashMap<K,V> copy = (OpenHashMap<K,V>) super.clone();
+ copy.table = copy.table.clone();
+ copy.values = copy.values.clone();
+ return copy;
+ } catch (CloneNotSupportedException exc) {
+ InternalError e = new InternalError();
+ e.initCause(exc);
+ throw e; //should never happen since we are cloneable
+ }
+ }
+
+ /**
+ * Returns <tt>true</tt> if the receiver contains the specified key.
+ *
+ * @return <tt>true</tt> if the receiver contains the specified key.
+ */
+ @SuppressWarnings("unchecked")
+ @Override
+ public boolean containsKey(Object key) {
+ return indexOfKey((K) key) >= 0;
+ }
+
+ /**
+ * Returns <tt>true</tt> if the receiver contains the specified value.
+ *
+ * @return <tt>true</tt> if the receiver contains the specified value.
+ */
+ @SuppressWarnings("unchecked")
+ @Override
+ public boolean containsValue(Object value) {
+ return indexOfValue((V)value) >= 0;
+ }
+
+ /**
+ * Ensures that the receiver can hold at least the specified number of associations without needing to allocate new
+ * internal memory. If necessary, allocates new internal memory and increases the capacity of the receiver. <p> This
+ * method never need be called; it is for performance tuning only. Calling this method before <tt>put()</tt>ing a
+ * large number of associations boosts performance, because the receiver will grow only once instead of potentially
+ * many times and hash collisions get less probable.
+ *
+ * @param minCapacity the desired minimum capacity.
+ */
+ public void ensureCapacity(int minCapacity) {
+ if (table.length < minCapacity) {
+ int newCapacity = nextPrime(minCapacity);
+ rehash(newCapacity);
+ }
+ }
+
+ /**
+ * Returns the value associated with the specified key. It is often a good idea to first check with {@link
+ * #containsKey(Object)} whether the given key has a value associated or not, i.e. whether there exists an association
+ * for the given key or not.
+ *
+ * @param key the key to be searched for.
+ * @return the value associated with the specified key; <tt>0</tt> if no such key is present.
+ */
+ @SuppressWarnings("unchecked")
+ @Override
+ public V get(Object key) {
+ int i = indexOfKey((K)key);
+ if (i < 0) {
+ return null;
+ } //not contained
+ return (V)values[i];
+ }
+
+ /**
+ * @param key the key to be added to the receiver.
+ * @return the index where the key would need to be inserted, if it is not already contained. Returns -index-1 if the
+ * key is already contained at slot index. Therefore, if the returned index < 0, then it is already contained
+ * at slot -index-1. If the returned index >= 0, then it is NOT already contained and should be inserted at
+ * slot index.
+ */
+ protected int indexOfInsertion(K key) {
+ Object[] tab = table;
+ int length = tab.length;
+
+ int hash = key.hashCode() & 0x7FFFFFFF;
+ int i = hash % length;
+ int decrement = hash % (length - 2); // double hashing, see http://www.eece.unm.edu/faculty/heileman/hash/node4.html
+ //int decrement = (hash / length) % length;
+ if (decrement == 0) {
+ decrement = 1;
+ }
+
+ // stop if we find a removed or free slot, or if we find the key itself
+ // do NOT skip over removed slots (yes, open addressing is like that...)
+ while (table[i] != FREE && table[i] != REMOVED && !equalsMindTheNull(key, tab[i])) {
+ i -= decrement;
+ //hashCollisions++;
+ if (i < 0) {
+ i += length;
+ }
+ }
+
+ if (table[i] == REMOVED) {
+ // stop if we find a free slot, or if we find the key itself.
+ // do skip over removed slots (yes, open addressing is like that...)
+ // assertion: there is at least one FREE slot.
+ int j = i;
+ while (table[i] != FREE && (table[i] == REMOVED || tab[i] != key)) {
+ i -= decrement;
+ //hashCollisions++;
+ if (i < 0) {
+ i += length;
+ }
+ }
+ if (table[i] == FREE) {
+ i = j;
+ }
+ }
+
+
+ if (table[i] != FREE && table[i] != REMOVED) {
+ // key already contained at slot i.
+ // return a negative number identifying the slot.
+ return -i - 1;
+ }
+ // not already contained, should be inserted at slot i.
+ // return a number >= 0 identifying the slot.
+ return i;
+ }
+
+ /**
+ * @param key the key to be searched in the receiver.
+ * @return the index where the key is contained in the receiver, returns -1 if the key was not found.
+ */
+ protected int indexOfKey(K key) {
+ Object[] tab = table;
+ int length = tab.length;
+
+ int hash = key.hashCode() & 0x7FFFFFFF;
+ int i = hash % length;
+ int decrement = hash % (length - 2); // double hashing, see http://www.eece.unm.edu/faculty/heileman/hash/node4.html
+ //int decrement = (hash / length) % length;
+ if (decrement == 0) {
+ decrement = 1;
+ }
+
+ // stop if we find a free slot, or if we find the key itself.
+ // do skip over removed slots (yes, open addressing is like that...)
+ while (tab[i] != FREE && (tab[i] == REMOVED || !equalsMindTheNull(key, tab[i]))) {
+ i -= decrement;
+ //hashCollisions++;
+ if (i < 0) {
+ i += length;
+ }
+ }
+
+ if (tab[i] == FREE) {
+ return -1;
+ } // not found
+ return i; //found, return index where key is contained
+ }
+
+ /**
+ * @param value the value to be searched in the receiver.
+ * @return the index where the value is contained in the receiver, returns -1 if the value was not found.
+ */
+ protected int indexOfValue(V value) {
+ Object[] val = values;
+
+ for (int i = values.length; --i >= 0;) {
+ if (table[i] != FREE && table[i] != REMOVED && equalsMindTheNull(val[i], value)) {
+ return i;
+ }
+ }
+
+ return -1; // not found
+ }
+
+ /**
+ * Associates the given key with the given value. Replaces any old <tt>(key,someOtherValue)</tt> association, if
+ * existing.
+ *
+ * @param key the key the value shall be associated with.
+ * @param value the value to be associated.
+ * @return <tt>true</tt> if the receiver did not already contain such a key; <tt>false</tt> if the receiver did
+ * already contain such a key - the new value has now replaced the formerly associated value.
+ */
+ @SuppressWarnings("unchecked")
+ @Override
+ public V put(K key, V value) {
+ int i = indexOfInsertion(key);
+ if (i < 0) { //already contained
+ i = -i - 1;
+ V previous = (V) this.values[i];
+ this.values[i] = value;
+ return previous;
+ }
+
+ if (this.distinct > this.highWaterMark) {
+ int newCapacity = chooseGrowCapacity(this.distinct + 1, this.minLoadFactor, this.maxLoadFactor);
+ rehash(newCapacity);
+ return put(key, value);
+ }
+
+ if (this.table[i] == FREE) {
+ this.freeEntries--;
+ }
+ this.table[i] = key;
+ this.values[i] = value;
+ this.distinct++;
+
+ if (this.freeEntries < 1) { //delta
+ int newCapacity = chooseGrowCapacity(this.distinct + 1, this.minLoadFactor, this.maxLoadFactor);
+ rehash(newCapacity);
+ }
+
+ return null;
+ }
+
+ /**
+ * Rehashes the contents of the receiver into a new table with a smaller or larger capacity. This method is called
+ * automatically when the number of keys in the receiver exceeds the high water mark or falls below the low water
+ * mark.
+ */
+ @SuppressWarnings("unchecked")
+ protected void rehash(int newCapacity) {
+ int oldCapacity = table.length;
+ //if (oldCapacity == newCapacity) return;
+
+ Object[] oldTable = table;
+ Object[] oldValues = values;
+
+ Object[] newTable = new Object[newCapacity];
+ Object[] newValues = new Object[newCapacity];
+
+ this.lowWaterMark = chooseLowWaterMark(newCapacity, this.minLoadFactor);
+ this.highWaterMark = chooseHighWaterMark(newCapacity, this.maxLoadFactor);
+
+ this.table = newTable;
+ this.values = newValues;
+ this.freeEntries = newCapacity - this.distinct; // delta
+
+ for (int i = oldCapacity; i-- > 0;) {
+ if (oldTable[i] != FREE && oldTable[i] != REMOVED) {
+ Object element = oldTable[i];
+ int index = indexOfInsertion((K)element);
+ newTable[index] = element;
+ newValues[index] = oldValues[i];
+ }
+ }
+ }
+
+ /**
+ * Removes the given key with its associated element from the receiver, if present.
+ *
+ * @param key the key to be removed from the receiver.
+ * @return <tt>true</tt> if the receiver contained the specified key, <tt>false</tt> otherwise.
+ */
+ @SuppressWarnings("unchecked")
+ @Override
+ public V remove(Object key) {
+ int i = indexOfKey((K)key);
+ if (i < 0) {
+ return null;
+ }
+ // key not contained
+ V removed = (V) values[i];
+
+ this.table[i] = REMOVED;
+ this.values[i] = null;
+ this.distinct--;
+
+ if (this.distinct < this.lowWaterMark) {
+ int newCapacity = chooseShrinkCapacity(this.distinct, this.minLoadFactor, this.maxLoadFactor);
+ rehash(newCapacity);
+ }
+
+ return removed;
+ }
+
+ /**
+ * Initializes the receiver.
+ *
+ * @param initialCapacity the initial capacity of the receiver.
+ * @param minLoadFactor the minLoadFactor of the receiver.
+ * @param maxLoadFactor the maxLoadFactor of the receiver.
+ * @throws IllegalArgumentException if <tt>initialCapacity < 0 || (minLoadFactor < 0.0 || minLoadFactor >= 1.0) ||
+ * (maxLoadFactor <= 0.0 || maxLoadFactor >= 1.0) || (minLoadFactor >=
+ * maxLoadFactor)</tt>.
+ */
+ protected void setUp(int initialCapacity, double minLoadFactor, double maxLoadFactor) {
+ if (initialCapacity < 0) {
+ throw new IllegalArgumentException("Initial Capacity must not be less than zero: " + initialCapacity);
+ }
+ if (minLoadFactor < 0.0 || minLoadFactor >= 1.0) {
+ throw new IllegalArgumentException("Illegal minLoadFactor: " + minLoadFactor);
+ }
+ if (maxLoadFactor <= 0.0 || maxLoadFactor >= 1.0) {
+ throw new IllegalArgumentException("Illegal maxLoadFactor: " + maxLoadFactor);
+ }
+ if (minLoadFactor >= maxLoadFactor) {
+ throw new IllegalArgumentException(
+ "Illegal minLoadFactor: " + minLoadFactor + " and maxLoadFactor: " + maxLoadFactor);
+ }
+ int capacity = initialCapacity;
+ capacity = nextPrime(capacity);
+ if (capacity == 0) {
+ capacity = 1;
+ } // open addressing needs at least one FREE slot at any time.
+
+ this.table = new Object[capacity];
+ this.values = new Object[capacity];
+
+ // memory will be exhausted long before this pathological case happens, anyway.
+ this.minLoadFactor = minLoadFactor;
+ if (capacity == largestPrime) {
+ this.maxLoadFactor = 1.0;
+ } else {
+ this.maxLoadFactor = maxLoadFactor;
+ }
+
+ this.distinct = 0;
+ this.freeEntries = capacity; // delta
+
+ // lowWaterMark will be established upon first expansion.
+ // establishing it now (upon instance construction) would immediately make the table shrink upon first put(...).
+ // After all the idea of an "initialCapacity" implies violating lowWaterMarks when an object is young.
+ // See ensureCapacity(...)
+ this.lowWaterMark = 0;
+ this.highWaterMark = chooseHighWaterMark(capacity, this.maxLoadFactor);
+ }
+
+ /**
+ * Trims the capacity of the receiver to be the receiver's current size. Releases any superfluous internal memory. An
+ * application can use this operation to minimize the storage of the receiver.
+ */
+ public void trimToSize() {
+ // * 1.2 because open addressing's performance exponentially degrades beyond that point
+ // so that even rehashing the table can take very long
+ int newCapacity = nextPrime((int) (1 + 1.2 * size()));
+ if (table.length > newCapacity) {
+ rehash(newCapacity);
+ }
+ }
+
+ public void concat() {
+ int newCap = nextPrime(size() + 1); // +1 as we always need a free slot
+ if (newCap != table.length) {
+ rehash(newCap);
+ }
+ }
+
+ /**
+ * Allocate a set to contain Map.Entry objects for the pairs and return it.
+ */
+ @Override
+ public Set<Entry<K,V>> entrySet() {
+ return new EntrySet();
+ }
+
+ /**
+ * Chooses a new prime table capacity optimized for growing that (approximately) satisfies the invariant <tt>c *
+ * minLoadFactor <= size <= c * maxLoadFactor</tt> and has at least one FREE slot for the given size.
+ */
+ protected int chooseGrowCapacity(int size, double minLoad, double maxLoad) {
+ return nextPrime(Math.max(size + 1, (int) ((4 * size / (3 * minLoad + maxLoad)))));
+ }
+
+ /**
+ * Returns new high water mark threshold based on current capacity and maxLoadFactor.
+ *
+ * @return int the new threshold.
+ */
+ protected int chooseHighWaterMark(int capacity, double maxLoad) {
+ return Math.min(capacity - 2, (int) (capacity * maxLoad)); //makes sure there is always at least one FREE slot
+ }
+
+ /**
+ * Returns new low water mark threshold based on current capacity and minLoadFactor.
+ *
+ * @return int the new threshold.
+ */
+ protected int chooseLowWaterMark(int capacity, double minLoad) {
+ return (int) (capacity * minLoad);
+ }
+
+ /**
+ * Chooses a new prime table capacity optimized for shrinking that (approximately) satisfies the invariant <tt>c *
+ * minLoadFactor <= size <= c * maxLoadFactor</tt> and has at least one FREE slot for the given size.
+ */
+ protected int chooseShrinkCapacity(int size, double minLoad, double maxLoad) {
+ return nextPrime(Math.max(size + 1, (int) ((4 * size / (minLoad + 3 * maxLoad)))));
+ }
+
+ /**
+ * Returns a prime number which is <code>&gt;= desiredCapacity</code> and very close to <code>desiredCapacity</code>
+ * (within 11% if <code>desiredCapacity &gt;= 1000</code>).
+ *
+ * @param desiredCapacity the capacity desired by the user.
+ * @return the capacity which should be used for a hashtable.
+ */
+ protected int nextPrime(int desiredCapacity) {
+ int i = java.util.Arrays.binarySearch(primeCapacities, desiredCapacity);
+ if (i < 0) {
+ // desired capacity not found, choose next prime greater than desired capacity
+ i = -i - 1; // remember the semantics of binarySearch...
+ }
+ return primeCapacities[i];
+ }
+
+ /**
+ * Returns the number of (key,value) associations currently contained.
+ *
+ * @return the number of (key,value) associations currently contained.
+ */
+ public int size() {
+ return distinct;
+ }
+
+ protected static boolean equalsMindTheNull(Object a, Object b) {
+ if (a == null && b == null) {
+ return true;
+ }
+ if (a == null || b == null) {
+ return false;
+ }
+ return a.equals(b);
+ }
+
+ private class EntrySet extends AbstractSet<Entry<K, V>> {
+ @Override
+ public Iterator<Entry<K, V>> iterator() {
+ return new EntrySetIterator();
+ }
+
+ @Override
+ public int size() {
+ return OpenHashMap.this.size();
+ }
+ }
+
+ private class EntrySetIterator implements Iterator<Entry<K, V>> {
+ int idx = -1;
+ Entry<K,V> next;
+
+ EntrySetIterator() {
+ forward();
+ }
+
+ @SuppressWarnings("unchecked")
+ private void forward() {
+ next = null;
+ while (next == null && ++idx < table.length) {
+ if (table[idx] != FREE && table[idx] != REMOVED) {
+ next = new SimpleImmutableEntry<K, V>((K) table[idx], (V) values[idx]);
+ }
+ }
+ }
+
+ public boolean hasNext() {
+ return next != null;
+ }
+
+ public Entry<K, V> next() {
+ if (next == null) {
+ throw new NoSuchElementException();
+ }
+ Entry<K,V> n = next;
+ forward();
+ return n;
+ }
+
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+
+ }
+
+}
diff --git a/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/util/OpenHashMapList.java b/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/util/OpenHashMapList.java
new file mode 100644
index 000000000..54d7da15f
--- /dev/null
+++ b/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/util/OpenHashMapList.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.felix.resolver.util;
+
+public class OpenHashMapList<K, V> extends OpenHashMap<K, CopyOnWriteList<V>> {
+
+ public OpenHashMapList() {
+ super();
+ }
+
+ public OpenHashMapList(int initialCapacity) {
+ super(initialCapacity);
+ }
+
+ public OpenHashMapList(int initialCapacity, double minLoadFactor, double maxLoadFactor) {
+ super(initialCapacity, minLoadFactor, maxLoadFactor);
+ }
+
+ public OpenHashMapList<K, V> deepClone() {
+ OpenHashMapList<K, V> copy = (OpenHashMapList<K, V>) super.clone();
+ Object[] values = copy.values;
+ for (int i = 0, l = values.length; i < l; i++) {
+ if (values[i] != null) {
+ values[i] = new CopyOnWriteList<V>((CopyOnWriteList<V>) values[i]);
+ }
+ }
+ return copy;
+ }
+}
diff --git a/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/util/OpenHashMapSet.java b/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/util/OpenHashMapSet.java
new file mode 100644
index 000000000..60a1c985a
--- /dev/null
+++ b/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/util/OpenHashMapSet.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.felix.resolver.util;
+
+public class OpenHashMapSet<K, V> extends OpenHashMap<K, CopyOnWriteSet<V>> {
+
+ public OpenHashMapSet() {
+ super();
+ }
+
+ public OpenHashMapSet(int initialCapacity) {
+ super(initialCapacity);
+ }
+
+ public OpenHashMapSet(int initialCapacity, double minLoadFactor, double maxLoadFactor) {
+ super(initialCapacity, minLoadFactor, maxLoadFactor);
+ }
+
+ public OpenHashMapSet<K, V> deepClone() {
+ OpenHashMapSet<K, V> copy = (OpenHashMapSet<K, V>) super.clone();
+ Object[] values = copy.values;
+ for (int i = 0, l = values.length; i < l; i++) {
+ if (values[i] != null) {
+ values[i] = new CopyOnWriteSet<V>((CopyOnWriteSet<V>) values[i]);
+ }
+ }
+ return copy;
+ }
+}
diff --git a/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/util/ShadowList.java b/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/util/ShadowList.java
new file mode 100755
index 000000000..52335fa25
--- /dev/null
+++ b/bundles/org.eclipse.osgi/felix/src/org/apache/felix/resolver/util/ShadowList.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.felix.resolver.util;
+
+import java.util.List;
+
+import org.apache.felix.resolver.util.CopyOnWriteList;
+
+public class ShadowList<T> extends CopyOnWriteList<T>
+{
+ private final List<T> m_original;
+
+ public ShadowList(List<T> original)
+ {
+ super(original);
+ m_original = original;
+ }
+
+ public List<T> getOriginal()
+ {
+ return m_original;
+ }
+
+} \ No newline at end of file

Back to the top