org.apache.lucene.misc_2.9.1.v20100421-0704

16:41:12.955 INFO  jd.cli.Main - Decompiling org.apache.lucene.misc_2.9.1.v20100421-0704.jar
package org.apache.lucene.index;

import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.util.Date;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.StringHelper;

public class FieldNormModifier
{
  private Directory dir;
  private Similarity sim;
  
  public static void main(String[] args)
    throws IOException
  {
    if (args.length < 3)
    {
      System.err.println("Usage: FieldNormModifier <index> <package.SimilarityClassName | -n> <field1> [field2] ...");
      System.exit(1);
    }
    Similarity s = null;
    if (!args[1].equals("-n")) {
      try
      {
        Class simClass = Class.forName(args[1]);
        s = (Similarity)simClass.newInstance();
      }
      catch (Exception e)
      {
        System.err.println("Couldn't instantiate similarity with empty constructor: " + args[1]);
        e.printStackTrace(System.err);
        System.exit(1);
      }
    }
    Directory d = FSDirectory.open(new File(args[0]));
    FieldNormModifier fnm = new FieldNormModifier(d, s);
    for (int i = 2; i < args.length; i++)
    {
      System.out.print("Updating field: " + args[i] + " " + new Date().toString() + " ... ");
      fnm.reSetNorms(args[i]);
      System.out.println(new Date().toString());
    }
    d.close();
  }
  
  public FieldNormModifier(Directory d, Similarity s)
  {
    dir = d;
    sim = s;
  }
  
  public void reSetNorms(String field)
    throws IOException
  {
    String fieldName = StringHelper.intern(field);
    int[] termCounts = new int[0];
    byte[] fakeNorms = new byte[0];
    
    IndexReader reader = null;
    TermEnum termEnum = null;
    TermDocs termDocs = null;
    try
    {
      reader = IndexReader.open(dir);
      termCounts = new int[reader.maxDoc()];
      if (sim == null) {
        fakeNorms = SegmentReader.createFakeNorms(reader.maxDoc());
      }
      try
      {
        termEnum = reader.terms(new Term(field));
        try
        {
          termDocs = reader.termDocs();
          do
          {
            Term term = termEnum.term();
            if ((term != null) && (term.field().equals(fieldName)))
            {
              termDocs.seek(termEnum.term());
              while (termDocs.next()) {
                termCounts[termDocs.doc()] += termDocs.freq();
              }
            }
          } while (termEnum.next());
        }
        finally
        {
          if (null != termDocs) {
            termDocs.close();
          }
        }
      }
      finally {}
    }
    finally
    {
      if (null != reader) {
        reader.close();
      }
    }
    try
    {
      reader = IndexReader.open(dir);
      for (int d = 0; d < termCounts.length; d++) {
        if (!reader.isDeleted(d)) {
          if (sim == null) {
            reader.setNorm(d, fieldName, fakeNorms[0]);
          } else {
            reader.setNorm(d, fieldName, Similarity.encodeNorm(sim.lengthNorm(fieldName, termCounts[d])));
          }
        }
      }
    }
    finally
    {
      if (null != reader) {
        reader.close();
      }
    }
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.index.FieldNormModifier
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.index;

class TermVectorAccessor$1 {}

/* Location:
 * Qualified Name:     org.apache.lucene.index.TermVectorAccessor.1
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.index;

class TermVectorAccessor$TermVectorMapperDecorator
  extends TermVectorMapper
{
  private TermVectorMapper decorated;
  
  TermVectorAccessor$TermVectorMapperDecorator(TermVectorAccessor.1 x0)
  {
    this();
  }
  
  public boolean isIgnoringPositions()
  {
    return decorated.isIgnoringPositions();
  }
  
  public boolean isIgnoringOffsets()
  {
    return decorated.isIgnoringOffsets();
  }
  
  private boolean termVectorStored = false;
  
  public void setExpectations(String field, int numTerms, boolean storeOffsets, boolean storePositions)
  {
    decorated.setExpectations(field, numTerms, storeOffsets, storePositions);
    termVectorStored = true;
  }
  
  public void map(String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions)
  {
    decorated.map(term, frequency, offsets, positions);
  }
  
  public void setDocumentNumber(int documentNumber)
  {
    decorated.setDocumentNumber(documentNumber);
  }
  
  private TermVectorAccessor$TermVectorMapperDecorator() {}
}

/* Location:
 * Qualified Name:     org.apache.lucene.index.TermVectorAccessor.TermVectorMapperDecorator
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.index;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.lucene.util.StringHelper;

public class TermVectorAccessor
{
  private TermVectorMapperDecorator decoratedMapper = new TermVectorMapperDecorator(null);
  private List tokens;
  private List positions;
  private List frequencies;
  
  public void accept(IndexReader indexReader, int documentNumber, String fieldName, TermVectorMapper mapper)
    throws IOException
  {
    fieldName = StringHelper.intern(fieldName);
    
    decoratedMapper.decorated = mapper;
    decoratedMapper.termVectorStored = false;
    
    indexReader.getTermFreqVector(documentNumber, fieldName, decoratedMapper);
    if (!decoratedMapper.termVectorStored)
    {
      mapper.setDocumentNumber(documentNumber);
      build(indexReader, fieldName, mapper, documentNumber);
    }
  }
  
  private void build(IndexReader indexReader, String field, TermVectorMapper mapper, int documentNumber)
    throws IOException
  {
    if (tokens == null)
    {
      tokens = new ArrayList(500);
      this.positions = new ArrayList(500);
      frequencies = new ArrayList(500);
    }
    else
    {
      tokens.clear();
      frequencies.clear();
      this.positions.clear();
    }
    TermEnum termEnum = indexReader.terms();
    if (termEnum.skipTo(new Term(field, "")))
    {
      while (termEnum.term().field() == field)
      {
        TermPositions termPositions = indexReader.termPositions(termEnum.term());
        if (termPositions.skipTo(documentNumber))
        {
          frequencies.add(new Integer(termPositions.freq()));
          tokens.add(termEnum.term().text());
          if (!mapper.isIgnoringPositions())
          {
            int[] positions = new int[termPositions.freq()];
            for (int i = 0; i < positions.length; i++) {
              positions[i] = termPositions.nextPosition();
            }
            this.positions.add(positions);
          }
          else
          {
            this.positions.add(null);
          }
        }
        termPositions.close();
        if (!termEnum.next()) {
          break;
        }
      }
      mapper.setDocumentNumber(documentNumber);
      mapper.setExpectations(field, tokens.size(), false, !mapper.isIgnoringPositions());
      for (int i = 0; i < tokens.size(); i++) {
        mapper.map((String)tokens.get(i), ((Integer)frequencies.get(i)).intValue(), (TermVectorOffsetInfo[])null, (int[])this.positions.get(i));
      }
    }
    termEnum.close();
  }
  
  private static class TermVectorMapperDecorator
    extends TermVectorMapper
  {
    private TermVectorMapper decorated;
    
    TermVectorMapperDecorator(TermVectorAccessor.1 x0)
    {
      this();
    }
    
    public boolean isIgnoringPositions()
    {
      return decorated.isIgnoringPositions();
    }
    
    public boolean isIgnoringOffsets()
    {
      return decorated.isIgnoringOffsets();
    }
    
    private boolean termVectorStored = false;
    
    public void setExpectations(String field, int numTerms, boolean storeOffsets, boolean storePositions)
    {
      decorated.setExpectations(field, numTerms, storeOffsets, storePositions);
      termVectorStored = true;
    }
    
    public void map(String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions)
    {
      decorated.map(term, frequency, offsets, positions);
    }
    
    public void setDocumentNumber(int documentNumber)
    {
      decorated.setDocumentNumber(documentNumber);
    }
    
    private TermVectorMapperDecorator() {}
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.index.TermVectorAccessor
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.misc;

import java.io.IOException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.OpenBitSet;
import org.apache.lucene.util.OpenBitSetDISI;
import org.apache.lucene.util.SortedVIntList;

public class ChainedFilter
  extends Filter
{
  public static final int OR = 0;
  public static final int AND = 1;
  public static final int ANDNOT = 2;
  public static final int XOR = 3;
  public static int DEFAULT = 0;
  private Filter[] chain = null;
  private int[] logicArray;
  private int logic = -1;
  
  public ChainedFilter(Filter[] chain)
  {
    this.chain = chain;
  }
  
  public ChainedFilter(Filter[] chain, int[] logicArray)
  {
    this.chain = chain;
    this.logicArray = logicArray;
  }
  
  public ChainedFilter(Filter[] chain, int logic)
  {
    this.chain = chain;
    this.logic = logic;
  }
  
  public DocIdSet getDocIdSet(IndexReader reader)
    throws IOException
  {
    int[] index = new int[1];
    index[0] = 0;
    if (logic != -1) {
      return getDocIdSet(reader, logic, index);
    }
    if (logicArray != null) {
      return getDocIdSet(reader, logicArray, index);
    }
    return getDocIdSet(reader, DEFAULT, index);
  }
  
  private DocIdSetIterator getDISI(Filter filter, IndexReader reader)
    throws IOException
  {
    DocIdSet docIdSet = filter.getDocIdSet(reader);
    if (docIdSet == null) {
      return DocIdSet.EMPTY_DOCIDSET.iterator();
    }
    DocIdSetIterator iter = docIdSet.iterator();
    if (iter == null) {
      return DocIdSet.EMPTY_DOCIDSET.iterator();
    }
    return iter;
  }
  
  private OpenBitSetDISI initialResult(IndexReader reader, int logic, int[] index)
    throws IOException
  {
    OpenBitSetDISI result;
    if (logic == 1)
    {
      OpenBitSetDISI result = new OpenBitSetDISI(getDISI(chain[index[0]], reader), reader.maxDoc());
      index[0] += 1;
    }
    else if (logic == 2)
    {
      OpenBitSetDISI result = new OpenBitSetDISI(getDISI(chain[index[0]], reader), reader.maxDoc());
      result.flip(0L, reader.maxDoc());
      index[0] += 1;
    }
    else
    {
      result = new OpenBitSetDISI(reader.maxDoc());
    }
    return result;
  }
  
  /**
   * @deprecated
   */
  protected DocIdSet finalResult(OpenBitSetDISI result, int maxDocs)
  {
    return result.cardinality() < maxDocs / 9 ? new SortedVIntList(result) : result;
  }
  
  private DocIdSet getDocIdSet(IndexReader reader, int logic, int[] index)
    throws IOException
  {
    OpenBitSetDISI result = initialResult(reader, logic, index);
    for (; index[0] < chain.length; index[0] += 1) {
      doChain(result, logic, chain[index[0]].getDocIdSet(reader));
    }
    return finalResult(result, reader.maxDoc());
  }
  
  private DocIdSet getDocIdSet(IndexReader reader, int[] logic, int[] index)
    throws IOException
  {
    if (logic.length != chain.length) {
      throw new IllegalArgumentException("Invalid number of elements in logic array");
    }
    OpenBitSetDISI result = initialResult(reader, logic[0], index);
    for (; index[0] < chain.length; index[0] += 1) {
      doChain(result, logic[index[0]], chain[index[0]].getDocIdSet(reader));
    }
    return finalResult(result, reader.maxDoc());
  }
  
  public String toString()
  {
    StringBuffer sb = new StringBuffer();
    sb.append("ChainedFilter: [");
    for (int i = 0; i < chain.length; i++)
    {
      sb.append(chain[i]);
      sb.append(' ');
    }
    sb.append(']');
    return sb.toString();
  }
  
  private void doChain(OpenBitSetDISI result, int logic, DocIdSet dis)
    throws IOException
  {
    if ((dis instanceof OpenBitSet))
    {
      switch (logic)
      {
      case 0: 
        result.or((OpenBitSet)dis);
        break;
      case 1: 
        result.and((OpenBitSet)dis);
        break;
      case 2: 
        result.andNot((OpenBitSet)dis);
        break;
      case 3: 
        result.xor((OpenBitSet)dis);
        break;
      default: 
        doChain(result, DEFAULT, dis);
        break;
      }
    }
    else
    {
      DocIdSetIterator disi;
      DocIdSetIterator disi;
      if (dis == null)
      {
        disi = DocIdSet.EMPTY_DOCIDSET.iterator();
      }
      else
      {
        disi = dis.iterator();
        if (disi == null) {
          disi = DocIdSet.EMPTY_DOCIDSET.iterator();
        }
      }
      switch (logic)
      {
      case 0: 
        result.inPlaceOr(disi);
        break;
      case 1: 
        result.inPlaceAnd(disi);
        break;
      case 2: 
        result.inPlaceNot(disi);
        break;
      case 3: 
        result.inPlaceXor(disi);
        break;
      default: 
        doChain(result, DEFAULT, dis);
      }
    }
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.misc.ChainedFilter
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.misc;

import java.io.PrintStream;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermEnum;

public class HighFreqTerms
{
  public static final int numTerms = 100;
  
  public static void main(String[] args)
    throws Exception
  {
    IndexReader reader = null;
    String field = null;
    if (args.length == 1)
    {
      reader = IndexReader.open(args[0]);
    }
    else if (args.length == 2)
    {
      reader = IndexReader.open(args[0]);
      field = args[1];
    }
    else
    {
      usage();
      System.exit(1);
    }
    TermInfoQueue tiq = new TermInfoQueue(100);
    TermEnum terms = reader.terms();
    if (field != null) {
      while (terms.next()) {
        if (terms.term().field().equals(field)) {
          tiq.insert(new TermInfo(terms.term(), terms.docFreq()));
        }
      }
    }
    while (terms.next()) {
      tiq.insert(new TermInfo(terms.term(), terms.docFreq()));
    }
    while (tiq.size() != 0)
    {
      TermInfo termInfo = (TermInfo)tiq.pop();
      System.out.println(term + " " + docFreq);
    }
    reader.close();
  }
  
  private static void usage()
  {
    System.out.println("\n\njava org.apache.lucene.misc.HighFreqTerms <index dir> [field]\n\n");
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.misc.HighFreqTerms
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.misc;

import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriter.MaxFieldLength;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;

public class IndexMergeTool
{
  public static void main(String[] args)
    throws IOException
  {
    if (args.length < 3)
    {
      System.err.println("Usage: IndexMergeTool <mergedIndex> <index1> <index2> [index3] ...");
      System.exit(1);
    }
    File mergedIndex = new File(args[0]);
    
    IndexWriter writer = new IndexWriter(mergedIndex, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
    
    Directory[] indexes = new Directory[args.length - 1];
    for (int i = 1; i < args.length; i++) {
      indexes[(i - 1)] = FSDirectory.open(new File(args[i]));
    }
    System.out.println("Merging...");
    writer.addIndexes(indexes);
    
    System.out.println("Optimizing...");
    writer.optimize();
    writer.close();
    System.out.println("Done.");
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.misc.IndexMergeTool
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.misc;

import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.util.Date;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.index.TermEnum;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.StringHelper;

/**
 * @deprecated
 */
public class LengthNormModifier
{
  private Directory dir;
  private Similarity sim;
  
  public static void main(String[] args)
    throws IOException
  {
    if (args.length < 3)
    {
      System.err.println("Usage: LengthNormModifier <index> <package.SimilarityClassName> <field1> [field2] ...");
      System.exit(1);
    }
    Similarity s = null;
    try
    {
      Class simClass = Class.forName(args[1]);
      s = (Similarity)simClass.newInstance();
    }
    catch (Exception e)
    {
      System.err.println("Couldn't instantiate similarity with empty constructor: " + args[1]);
      e.printStackTrace(System.err);
    }
    File index = new File(args[0]);
    Directory d = FSDirectory.open(index);
    
    LengthNormModifier lnm = new LengthNormModifier(d, s);
    for (int i = 2; i < args.length; i++)
    {
      System.out.print("Updating field: " + args[i] + " " + new Date().toString() + " ... ");
      lnm.reSetNorms(args[i]);
      System.out.println(new Date().toString());
    }
    d.close();
  }
  
  public LengthNormModifier(Directory d, Similarity s)
  {
    dir = d;
    sim = s;
  }
  
  public void reSetNorms(String field)
    throws IOException
  {
    String fieldName = StringHelper.intern(field);
    int[] termCounts = new int[0];
    
    IndexReader reader = null;
    TermEnum termEnum = null;
    TermDocs termDocs = null;
    try
    {
      reader = IndexReader.open(dir);
      termCounts = new int[reader.maxDoc()];
      try
      {
        termEnum = reader.terms(new Term(field));
        try
        {
          termDocs = reader.termDocs();
          do
          {
            Term term = termEnum.term();
            if ((term != null) && (term.field().equals(fieldName)))
            {
              termDocs.seek(termEnum.term());
              while (termDocs.next()) {
                termCounts[termDocs.doc()] += termDocs.freq();
              }
            }
          } while (termEnum.next());
        }
        finally
        {
          if (null != termDocs) {
            termDocs.close();
          }
        }
      }
      finally {}
    }
    finally
    {
      if (null != reader) {
        reader.close();
      }
    }
    try
    {
      reader = IndexReader.open(dir);
      for (int d = 0; d < termCounts.length; d++) {
        if (!reader.isDeleted(d))
        {
          byte norm = Similarity.encodeNorm(sim.lengthNorm(fieldName, termCounts[d]));
          reader.setNorm(d, fieldName, norm);
        }
      }
    }
    finally
    {
      if (null != reader) {
        reader.close();
      }
    }
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.misc.LengthNormModifier
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.misc;

import java.util.HashMap;
import java.util.Map;
import org.apache.lucene.index.FieldInvertState;
import org.apache.lucene.search.DefaultSimilarity;

public class SweetSpotSimilarity
  extends DefaultSimilarity
{
  private int ln_min = 1;
  private int ln_max = 1;
  private float ln_steep = 0.5F;
  private Map ln_mins = new HashMap(7);
  private Map ln_maxs = new HashMap(7);
  private Map ln_steeps = new HashMap(7);
  private Map ln_overlaps = new HashMap(7);
  private float tf_base = 0.0F;
  private float tf_min = 0.0F;
  private float tf_hyper_min = 0.0F;
  private float tf_hyper_max = 2.0F;
  private double tf_hyper_base = 1.3D;
  private float tf_hyper_xoffset = 10.0F;
  
  public void setBaselineTfFactors(float base, float min)
  {
    tf_min = min;
    tf_base = base;
  }
  
  public void setHyperbolicTfFactors(float min, float max, double base, float xoffset)
  {
    tf_hyper_min = min;
    tf_hyper_max = max;
    tf_hyper_base = base;
    tf_hyper_xoffset = xoffset;
  }
  
  public void setLengthNormFactors(int min, int max, float steepness)
  {
    ln_min = min;
    ln_max = max;
    ln_steep = steepness;
  }
  
  public void setLengthNormFactors(String field, int min, int max, float steepness, boolean discountOverlaps)
  {
    ln_mins.put(field, new Integer(min));
    ln_maxs.put(field, new Integer(max));
    ln_steeps.put(field, new Float(steepness));
    ln_overlaps.put(field, new Boolean(discountOverlaps));
  }
  
  public float computeNorm(String fieldName, FieldInvertState state)
  {
    boolean overlaps = discountOverlaps;
    if (ln_overlaps.containsKey(fieldName)) {
      overlaps = ((Boolean)ln_overlaps.get(fieldName)).booleanValue();
    }
    int numTokens;
    int numTokens;
    if (overlaps) {
      numTokens = state.getLength() - state.getNumOverlap();
    } else {
      numTokens = state.getLength();
    }
    return state.getBoost() * lengthNorm(fieldName, numTokens);
  }
  
  public float lengthNorm(String fieldName, int numTerms)
  {
    int l = ln_min;
    int h = ln_max;
    float s = ln_steep;
    if (ln_mins.containsKey(fieldName)) {
      l = ((Number)ln_mins.get(fieldName)).intValue();
    }
    if (ln_maxs.containsKey(fieldName)) {
      h = ((Number)ln_maxs.get(fieldName)).intValue();
    }
    if (ln_steeps.containsKey(fieldName)) {
      s = ((Number)ln_steeps.get(fieldName)).floatValue();
    }
    return (float)(1.0D / Math.sqrt(s * (Math.abs(numTerms - l) + Math.abs(numTerms - h) - (h - l)) + 1.0F));
  }
  
  public float tf(int freq)
  {
    return baselineTf(freq);
  }
  
  public float baselineTf(float freq)
  {
    if (0.0F == freq) {
      return 0.0F;
    }
    return freq <= tf_min ? tf_base : (float)Math.sqrt(freq + tf_base * tf_base - tf_min);
  }
  
  public float hyperbolicTf(float freq)
  {
    if (0.0F == freq) {
      return 0.0F;
    }
    float min = tf_hyper_min;
    float max = tf_hyper_max;
    double base = tf_hyper_base;
    float xoffset = tf_hyper_xoffset;
    double x = freq - xoffset;
    
    float result = min + (float)((max - min) / 2.0F * ((Math.pow(base, x) - Math.pow(base, -x)) / (Math.pow(base, x) + Math.pow(base, -x)) + 1.0D));
    
    return Float.isNaN(result) ? max : result;
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.misc.SweetSpotSimilarity
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.misc;

import org.apache.lucene.index.Term;

final class TermInfo
{
  int docFreq;
  Term term;
  
  TermInfo(Term t, int df)
  {
    term = t;
    docFreq = df;
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.misc.TermInfo
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.misc;

import org.apache.lucene.util.PriorityQueue;

final class TermInfoQueue
  extends PriorityQueue
{
  TermInfoQueue(int size)
  {
    initialize(size);
  }
  
  protected final boolean lessThan(Object a, Object b)
  {
    TermInfo termInfoA = (TermInfo)a;
    TermInfo termInfoB = (TermInfo)b;
    return docFreq < docFreq;
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.misc.TermInfoQueue
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.queryParser.analyzing;

import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.List;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.Version;

public class AnalyzingQueryParser
  extends QueryParser
{
  /**
   * @deprecated
   */
  public AnalyzingQueryParser(String field, Analyzer analyzer)
  {
    super(field, analyzer);
  }
  
  public AnalyzingQueryParser(Version matchVersion, String field, Analyzer analyzer)
  {
    super(matchVersion, field, analyzer);
  }
  
  protected Query getWildcardQuery(String field, String termStr)
    throws ParseException
  {
    List tlist = new ArrayList();
    List wlist = new ArrayList();
    
    boolean isWithinToken = (!termStr.startsWith("?")) && (!termStr.startsWith("*"));
    StringBuffer tmpBuffer = new StringBuffer();
    char[] chars = termStr.toCharArray();
    for (int i = 0; i < termStr.length(); i++)
    {
      if ((chars[i] == '?') || (chars[i] == '*'))
      {
        if (isWithinToken)
        {
          tlist.add(tmpBuffer.toString());
          tmpBuffer.setLength(0);
        }
        isWithinToken = false;
      }
      else
      {
        if (!isWithinToken)
        {
          wlist.add(tmpBuffer.toString());
          tmpBuffer.setLength(0);
        }
        isWithinToken = true;
      }
      tmpBuffer.append(chars[i]);
    }
    if (isWithinToken) {
      tlist.add(tmpBuffer.toString());
    } else {
      wlist.add(tmpBuffer.toString());
    }
    TokenStream source = getAnalyzer().tokenStream(field, new StringReader(termStr));
    TermAttribute termAtt = (TermAttribute)source.addAttribute(TermAttribute.class);
    
    int countTokens = 0;
    for (;;)
    {
      try
      {
        if (!source.incrementToken()) {
          break;
        }
      }
      catch (IOException e)
      {
        break;
      }
      String term = termAtt.term();
      if (!"".equals(term)) {
        try
        {
          tlist.set(countTokens++, term);
        }
        catch (IndexOutOfBoundsException ioobe)
        {
          countTokens = -1;
        }
      }
    }
    try
    {
      source.close();
    }
    catch (IOException e) {}
    if (countTokens != tlist.size()) {
      throw new ParseException("Cannot build WildcardQuery with analyzer " + getAnalyzer().getClass() + " - tokens added or lost");
    }
    if (tlist.size() == 0) {
      return null;
    }
    if (tlist.size() == 1)
    {
      if ((wlist != null) && (wlist.size() == 1)) {
        return super.getWildcardQuery(field, (String)tlist.get(0) + ((String)wlist.get(0)).toString());
      }
      throw new IllegalArgumentException("getWildcardQuery called without wildcard");
    }
    StringBuffer sb = new StringBuffer();
    for (int i = 0; i < tlist.size(); i++)
    {
      sb.append((String)tlist.get(i));
      if ((wlist != null) && (wlist.size() > i)) {
        sb.append((String)wlist.get(i));
      }
    }
    return super.getWildcardQuery(field, sb.toString());
  }
  
  protected Query getPrefixQuery(String field, String termStr)
    throws ParseException
  {
    TokenStream source = getAnalyzer().tokenStream(field, new StringReader(termStr));
    List tlist = new ArrayList();
    TermAttribute termAtt = (TermAttribute)source.addAttribute(TermAttribute.class);
    for (;;)
    {
      try
      {
        if (!source.incrementToken()) {
          break;
        }
      }
      catch (IOException e)
      {
        break;
      }
      tlist.add(termAtt.term());
    }
    try
    {
      source.close();
    }
    catch (IOException e) {}
    if (tlist.size() == 1) {
      return super.getPrefixQuery(field, (String)tlist.get(0));
    }
    throw new ParseException("Cannot build PrefixQuery with analyzer " + getAnalyzer().getClass() + (tlist.size() > 1 ? " - token(s) added" : " - token consumed"));
  }
  
  protected Query getFuzzyQuery(String field, String termStr, float minSimilarity)
    throws ParseException
  {
    TokenStream source = getAnalyzer().tokenStream(field, new StringReader(termStr));
    TermAttribute termAtt = (TermAttribute)source.addAttribute(TermAttribute.class);
    String nextToken = null;
    boolean multipleTokens = false;
    try
    {
      if (source.incrementToken()) {
        nextToken = termAtt.term();
      }
      multipleTokens = source.incrementToken();
    }
    catch (IOException e)
    {
      nextToken = null;
    }
    try
    {
      source.close();
    }
    catch (IOException e) {}
    if (multipleTokens) {
      throw new ParseException("Cannot build FuzzyQuery with analyzer " + getAnalyzer().getClass() + " - tokens were added");
    }
    return nextToken == null ? null : super.getFuzzyQuery(field, nextToken, minSimilarity);
  }
  
  protected Query getRangeQuery(String field, String part1, String part2, boolean inclusive)
    throws ParseException
  {
    TokenStream source = getAnalyzer().tokenStream(field, new StringReader(part1));
    TermAttribute termAtt = (TermAttribute)source.addAttribute(TermAttribute.class);
    boolean multipleTokens = false;
    try
    {
      if (source.incrementToken()) {
        part1 = termAtt.term();
      }
      multipleTokens = source.incrementToken();
    }
    catch (IOException e) {}
    try
    {
      source.close();
    }
    catch (IOException e) {}
    if (multipleTokens) {
      throw new ParseException("Cannot build RangeQuery with analyzer " + getAnalyzer().getClass() + " - tokens were added to part1");
    }
    source = getAnalyzer().tokenStream(field, new StringReader(part2));
    termAtt = (TermAttribute)source.addAttribute(TermAttribute.class);
    try
    {
      if (source.incrementToken()) {
        part2 = termAtt.term();
      }
      multipleTokens = source.incrementToken();
    }
    catch (IOException e) {}
    try
    {
      source.close();
    }
    catch (IOException e) {}
    if (multipleTokens) {
      throw new ParseException("Cannot build RangeQuery with analyzer " + getAnalyzer().getClass() + " - tokens were added to part2");
    }
    return super.getRangeQuery(field, part1, part2, inclusive);
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.queryParser.analyzing.AnalyzingQueryParser
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.queryParser.complexPhrase;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanNotQuery;
import org.apache.lucene.search.spans.SpanOrQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;

class ComplexPhraseQueryParser$ComplexPhraseQuery
  extends Query
{
  String field;
  String phrasedQueryStringContents;
  int slopFactor;
  private Query contents;
  
  public ComplexPhraseQueryParser$ComplexPhraseQuery(String field, String phrasedQueryStringContents, int slopFactor)
  {
    this.field = field;
    this.phrasedQueryStringContents = phrasedQueryStringContents;
    this.slopFactor = slopFactor;
  }
  
  protected void parsePhraseElements(QueryParser qp)
    throws ParseException
  {
    contents = qp.parse(phrasedQueryStringContents);
  }
  
  public Query rewrite(IndexReader reader)
    throws IOException
  {
    if ((contents instanceof TermQuery)) {
      return contents;
    }
    int numNegatives = 0;
    if (!(contents instanceof BooleanQuery)) {
      throw new IllegalArgumentException("Unknown query type \"" + contents.getClass().getName() + "\" found in phrase query string \"" + phrasedQueryStringContents + "\"");
    }
    BooleanQuery bq = (BooleanQuery)contents;
    BooleanClause[] bclauses = bq.getClauses();
    SpanQuery[] allSpanClauses = new SpanQuery[bclauses.length];
    for (int i = 0; i < bclauses.length; i++)
    {
      Query qc = bclauses[i].getQuery();
      
      qc = qc.rewrite(reader);
      if (bclauses[i].getOccur().equals(BooleanClause.Occur.MUST_NOT)) {
        numNegatives++;
      }
      if ((qc instanceof BooleanQuery))
      {
        ArrayList sc = new ArrayList();
        addComplexPhraseClause(sc, (BooleanQuery)qc);
        if (sc.size() > 0) {
          allSpanClauses[i] = ((SpanQuery)sc.get(0));
        } else {
          allSpanClauses[i] = new SpanTermQuery(new Term(field, "Dummy clause because no terms found - must match nothing"));
        }
      }
      else if ((qc instanceof TermQuery))
      {
        TermQuery tq = (TermQuery)qc;
        allSpanClauses[i] = new SpanTermQuery(tq.getTerm());
      }
      else
      {
        throw new IllegalArgumentException("Unknown query type \"" + qc.getClass().getName() + "\" found in phrase query string \"" + phrasedQueryStringContents + "\"");
      }
    }
    if (numNegatives == 0) {
      return new SpanNearQuery(allSpanClauses, slopFactor, true);
    }
    ArrayList positiveClauses = new ArrayList();
    for (int j = 0; j < allSpanClauses.length; j++) {
      if (!bclauses[j].getOccur().equals(BooleanClause.Occur.MUST_NOT)) {
        positiveClauses.add(allSpanClauses[j]);
      }
    }
    SpanQuery[] includeClauses = (SpanQuery[])positiveClauses.toArray(new SpanQuery[positiveClauses.size()]);
    
    SpanQuery include = null;
    if (includeClauses.length == 1) {
      include = includeClauses[0];
    } else {
      include = new SpanNearQuery(includeClauses, slopFactor + numNegatives, true);
    }
    SpanNearQuery exclude = new SpanNearQuery(allSpanClauses, slopFactor, true);
    
    SpanNotQuery snot = new SpanNotQuery(include, exclude);
    return snot;
  }
  
  private void addComplexPhraseClause(List spanClauses, BooleanQuery qc)
  {
    ArrayList ors = new ArrayList();
    ArrayList nots = new ArrayList();
    BooleanClause[] bclauses = qc.getClauses();
    for (int i = 0; i < bclauses.length; i++)
    {
      Query childQuery = bclauses[i].getQuery();
      
      ArrayList chosenList = ors;
      if (bclauses[i].getOccur() == BooleanClause.Occur.MUST_NOT) {
        chosenList = nots;
      }
      if ((childQuery instanceof TermQuery))
      {
        TermQuery tq = (TermQuery)childQuery;
        SpanTermQuery stq = new SpanTermQuery(tq.getTerm());
        stq.setBoost(tq.getBoost());
        chosenList.add(stq);
      }
      else if ((childQuery instanceof BooleanQuery))
      {
        BooleanQuery cbq = (BooleanQuery)childQuery;
        addComplexPhraseClause(chosenList, cbq);
      }
      else
      {
        throw new IllegalArgumentException("Unknown query type:" + childQuery.getClass().getName());
      }
    }
    if (ors.size() == 0) {
      return;
    }
    SpanOrQuery soq = new SpanOrQuery((SpanQuery[])ors.toArray(new SpanQuery[ors.size()]));
    if (nots.size() == 0)
    {
      spanClauses.add(soq);
    }
    else
    {
      SpanOrQuery snqs = new SpanOrQuery((SpanQuery[])nots.toArray(new SpanQuery[nots.size()]));
      
      SpanNotQuery snq = new SpanNotQuery(soq, snqs);
      spanClauses.add(snq);
    }
  }
  
  public String toString(String field)
  {
    return "\"" + phrasedQueryStringContents + "\"";
  }
  
  public int hashCode()
  {
    int prime = 31;
    int result = 1;
    result = 31 * result + (field == null ? 0 : field.hashCode());
    result = 31 * result + (phrasedQueryStringContents == null ? 0 : phrasedQueryStringContents.hashCode());
    
    result = 31 * result + slopFactor;
    return result;
  }
  
  public boolean equals(Object obj)
  {
    if (this == obj) {
      return true;
    }
    if (obj == null) {
      return false;
    }
    if (getClass() != obj.getClass()) {
      return false;
    }
    ComplexPhraseQuery other = (ComplexPhraseQuery)obj;
    if (field == null)
    {
      if (field != null) {
        return false;
      }
    }
    else if (!field.equals(field)) {
      return false;
    }
    if (phrasedQueryStringContents == null)
    {
      if (phrasedQueryStringContents != null) {
        return false;
      }
    }
    else if (!phrasedQueryStringContents.equals(phrasedQueryStringContents)) {
      return false;
    }
    if (slopFactor != slopFactor) {
      return false;
    }
    return true;
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.queryParser.complexPhrase.ComplexPhraseQueryParser.ComplexPhraseQuery
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.queryParser.complexPhrase;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.MultiTermQuery.RewriteMethod;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanNotQuery;
import org.apache.lucene.search.spans.SpanOrQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.util.Version;

public class ComplexPhraseQueryParser
  extends QueryParser
{
  private ArrayList complexPhrases = null;
  private boolean isPass2ResolvingPhrases;
  private ComplexPhraseQuery currentPhraseQuery = null;
  
  /**
   * @deprecated
   */
  public ComplexPhraseQueryParser(String f, Analyzer a)
  {
    this(Version.LUCENE_24, f, a);
  }
  
  public ComplexPhraseQueryParser(Version matchVersion, String f, Analyzer a)
  {
    super(matchVersion, f, a);
  }
  
  protected Query getFieldQuery(String field, String queryText, int slop)
  {
    ComplexPhraseQuery cpq = new ComplexPhraseQuery(field, queryText, slop);
    complexPhrases.add(cpq);
    
    return cpq;
  }
  
  public Query parse(String query)
    throws ParseException
  {
    if (isPass2ResolvingPhrases)
    {
      MultiTermQuery.RewriteMethod oldMethod = getMultiTermRewriteMethod();
      try
      {
        setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
        return super.parse(query);
      }
      finally
      {
        setMultiTermRewriteMethod(oldMethod);
      }
    }
    complexPhrases = new ArrayList();
    Query q = super.parse(query);
    
    isPass2ResolvingPhrases = true;
    try
    {
      for (iterator = complexPhrases.iterator(); iterator.hasNext();)
      {
        currentPhraseQuery = ((ComplexPhraseQuery)iterator.next());
        
        currentPhraseQuery.parsePhraseElements(this);
      }
    }
    finally
    {
      Iterator iterator;
      isPass2ResolvingPhrases = false;
    }
    return q;
  }
  
  protected Query newTermQuery(Term term)
  {
    if (isPass2ResolvingPhrases) {
      try
      {
        checkPhraseClauseIsForSameField(term.field());
      }
      catch (ParseException pe)
      {
        throw new RuntimeException("Error parsing complex phrase", pe);
      }
    }
    return super.newTermQuery(term);
  }
  
  private void checkPhraseClauseIsForSameField(String field)
    throws ParseException
  {
    if (!field.equals(currentPhraseQuery.field)) {
      throw new ParseException("Cannot have clause for field \"" + field + "\" nested in phrase " + " for field \"" + currentPhraseQuery.field + "\
1 2 3 4

Further reading...

For more information on Java 1.5 Tiger, you may find Java 1.5 Tiger, A developer's Notebook by D. Flanagan and B. McLaughlin from O'Reilly of interest.

New!JAR listings


Copyright 2006-2017. Infinite Loop Ltd