org.apache.lucene.highlighter_2.9.1.v20100421-0704

16:41:11.830 INFO  jd.cli.Main - Decompiling org.apache.lucene.highlighter_2.9.1.v20100421-0704.jar
package org.apache.lucene.search.highlight;

public class DefaultEncoder
  implements Encoder
{
  public String encodeText(String originalText)
  {
    return originalText;
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.search.highlight.DefaultEncoder
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.search.highlight;

public abstract interface Encoder
{
  public abstract String encodeText(String paramString);
}

/* Location:
 * Qualified Name:     org.apache.lucene.search.highlight.Encoder
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.search.highlight;

public abstract interface Formatter
{
  public abstract String highlightTerm(String paramString, TokenGroup paramTokenGroup);
}

/* Location:
 * Qualified Name:     org.apache.lucene.search.highlight.Formatter
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.search.highlight;

import org.apache.lucene.util.PriorityQueue;

class FragmentQueue
  extends PriorityQueue
{
  public FragmentQueue(int size)
  {
    initialize(size);
  }
  
  public final boolean lessThan(Object a, Object b)
  {
    TextFragment fragA = (TextFragment)a;
    TextFragment fragB = (TextFragment)b;
    if (fragA.getScore() == fragB.getScore()) {
      return fragNum > fragNum;
    }
    return fragA.getScore() < fragB.getScore();
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.search.highlight.FragmentQueue
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.search.highlight;

import org.apache.lucene.analysis.TokenStream;

public abstract interface Fragmenter
{
  public abstract void start(String paramString, TokenStream paramTokenStream);
  
  public abstract boolean isNewFragment();
}

/* Location:
 * Qualified Name:     org.apache.lucene.search.highlight.Fragmenter
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.search.highlight;

public class GradientFormatter
  implements Formatter
{
  private float maxScore;
  int fgRMin;
  int fgGMin;
  int fgBMin;
  int fgRMax;
  int fgGMax;
  int fgBMax;
  protected boolean highlightForeground;
  int bgRMin;
  int bgGMin;
  int bgBMin;
  int bgRMax;
  int bgGMax;
  int bgBMax;
  protected boolean highlightBackground;
  
  public GradientFormatter(float maxScore, String minForegroundColor, String maxForegroundColor, String minBackgroundColor, String maxBackgroundColor)
  {
    highlightForeground = ((minForegroundColor != null) && (maxForegroundColor != null));
    if (highlightForeground)
    {
      if (minForegroundColor.length() != 7) {
        throw new IllegalArgumentException("minForegroundColor is not 7 bytes long eg a hex RGB value such as #FFFFFF");
      }
      if (maxForegroundColor.length() != 7) {
        throw new IllegalArgumentException("minForegroundColor is not 7 bytes long eg a hex RGB value such as #FFFFFF");
      }
      fgRMin = hexToInt(minForegroundColor.substring(1, 3));
      fgGMin = hexToInt(minForegroundColor.substring(3, 5));
      fgBMin = hexToInt(minForegroundColor.substring(5, 7));
      
      fgRMax = hexToInt(maxForegroundColor.substring(1, 3));
      fgGMax = hexToInt(maxForegroundColor.substring(3, 5));
      fgBMax = hexToInt(maxForegroundColor.substring(5, 7));
    }
    highlightBackground = ((minBackgroundColor != null) && (maxBackgroundColor != null));
    if (highlightBackground)
    {
      if (minBackgroundColor.length() != 7) {
        throw new IllegalArgumentException("minBackgroundColor is not 7 bytes long eg a hex RGB value such as #FFFFFF");
      }
      if (maxBackgroundColor.length() != 7) {
        throw new IllegalArgumentException("minBackgroundColor is not 7 bytes long eg a hex RGB value such as #FFFFFF");
      }
      bgRMin = hexToInt(minBackgroundColor.substring(1, 3));
      bgGMin = hexToInt(minBackgroundColor.substring(3, 5));
      bgBMin = hexToInt(minBackgroundColor.substring(5, 7));
      
      bgRMax = hexToInt(maxBackgroundColor.substring(1, 3));
      bgGMax = hexToInt(maxBackgroundColor.substring(3, 5));
      bgBMax = hexToInt(maxBackgroundColor.substring(5, 7));
    }
    this.maxScore = maxScore;
  }
  
  public String highlightTerm(String originalText, TokenGroup tokenGroup)
  {
    if (tokenGroup.getTotalScore() == 0.0F) {
      return originalText;
    }
    float score = tokenGroup.getTotalScore();
    if (score == 0.0F) {
      return originalText;
    }
    StringBuffer sb = new StringBuffer();
    sb.append("<font ");
    if (highlightForeground)
    {
      sb.append("color=\"");
      sb.append(getForegroundColorString(score));
      sb.append("\" ");
    }
    if (highlightBackground)
    {
      sb.append("bgcolor=\"");
      sb.append(getBackgroundColorString(score));
      sb.append("\" ");
    }
    sb.append(">");
    sb.append(originalText);
    sb.append("</font>");
    return sb.toString();
  }
  
  protected String getForegroundColorString(float score)
  {
    int rVal = getColorVal(fgRMin, fgRMax, score);
    int gVal = getColorVal(fgGMin, fgGMax, score);
    int bVal = getColorVal(fgBMin, fgBMax, score);
    StringBuffer sb = new StringBuffer();
    sb.append("#");
    sb.append(intToHex(rVal));
    sb.append(intToHex(gVal));
    sb.append(intToHex(bVal));
    return sb.toString();
  }
  
  protected String getBackgroundColorString(float score)
  {
    int rVal = getColorVal(bgRMin, bgRMax, score);
    int gVal = getColorVal(bgGMin, bgGMax, score);
    int bVal = getColorVal(bgBMin, bgBMax, score);
    StringBuffer sb = new StringBuffer();
    sb.append("#");
    sb.append(intToHex(rVal));
    sb.append(intToHex(gVal));
    sb.append(intToHex(bVal));
    return sb.toString();
  }
  
  private int getColorVal(int colorMin, int colorMax, float score)
  {
    if (colorMin == colorMax) {
      return colorMin;
    }
    float scale = Math.abs(colorMin - colorMax);
    float relScorePercent = Math.min(maxScore, score) / maxScore;
    float colScore = scale * relScorePercent;
    return Math.min(colorMin, colorMax) + (int)colScore;
  }
  
  private static char[] hexDigits = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' };
  
  private static String intToHex(int i)
  {
    return "" + hexDigits[((i & 0xF0) >> 4)] + hexDigits[(i & 0xF)];
  }
  
  public static final int hexToInt(String hex)
  {
    int len = hex.length();
    if (len > 16) {
      throw new NumberFormatException();
    }
    int l = 0;
    for (int i = 0; i < len; i++)
    {
      l <<= 4;
      int c = Character.digit(hex.charAt(i), 16);
      if (c < 0) {
        throw new NumberFormatException();
      }
      l |= c;
    }
    return l;
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.search.highlight.GradientFormatter
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.search.highlight;

import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.Iterator;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;

public class Highlighter
{
  public static final int DEFAULT_MAX_CHARS_TO_ANALYZE = 51200;
  /**
   * @deprecated
   */
  public static final int DEFAULT_MAX_DOC_BYTES_TO_ANALYZE = 51200;
  private int maxDocCharsToAnalyze = 51200;
  private Formatter formatter;
  private Encoder encoder;
  private Fragmenter textFragmenter = new SimpleFragmenter();
  private Scorer fragmentScorer = null;
  
  public Highlighter(Scorer fragmentScorer)
  {
    this(new SimpleHTMLFormatter(), fragmentScorer);
  }
  
  public Highlighter(Formatter formatter, Scorer fragmentScorer)
  {
    this(formatter, new DefaultEncoder(), fragmentScorer);
  }
  
  public Highlighter(Formatter formatter, Encoder encoder, Scorer fragmentScorer)
  {
    this.formatter = formatter;
    this.encoder = encoder;
    this.fragmentScorer = fragmentScorer;
  }
  
  public final String getBestFragment(Analyzer analyzer, String fieldName, String text)
    throws IOException, InvalidTokenOffsetsException
  {
    TokenStream tokenStream = analyzer.tokenStream(fieldName, new StringReader(text));
    return getBestFragment(tokenStream, text);
  }
  
  public final String getBestFragment(TokenStream tokenStream, String text)
    throws IOException, InvalidTokenOffsetsException
  {
    String[] results = getBestFragments(tokenStream, text, 1);
    if (results.length > 0) {
      return results[0];
    }
    return null;
  }
  
  /**
   * @deprecated
   */
  public final String[] getBestFragments(Analyzer analyzer, String text, int maxNumFragments)
    throws IOException, InvalidTokenOffsetsException
  {
    TokenStream tokenStream = analyzer.tokenStream("field", new StringReader(text));
    return getBestFragments(tokenStream, text, maxNumFragments);
  }
  
  public final String[] getBestFragments(Analyzer analyzer, String fieldName, String text, int maxNumFragments)
    throws IOException, InvalidTokenOffsetsException
  {
    TokenStream tokenStream = analyzer.tokenStream(fieldName, new StringReader(text));
    return getBestFragments(tokenStream, text, maxNumFragments);
  }
  
  public final String[] getBestFragments(TokenStream tokenStream, String text, int maxNumFragments)
    throws IOException, InvalidTokenOffsetsException
  {
    maxNumFragments = Math.max(1, maxNumFragments);
    
    TextFragment[] frag = getBestTextFragments(tokenStream, text, true, maxNumFragments);
    
    ArrayList fragTexts = new ArrayList();
    for (int i = 0; i < frag.length; i++) {
      if ((frag[i] != null) && (frag[i].getScore() > 0.0F)) {
        fragTexts.add(frag[i].toString());
      }
    }
    return (String[])fragTexts.toArray(new String[0]);
  }
  
  public final TextFragment[] getBestTextFragments(TokenStream tokenStream, String text, boolean mergeContiguousFragments, int maxNumFragments)
    throws IOException, InvalidTokenOffsetsException
  {
    ArrayList docFrags = new ArrayList();
    StringBuffer newText = new StringBuffer();
    
    TermAttribute termAtt = (TermAttribute)tokenStream.addAttribute(TermAttribute.class);
    OffsetAttribute offsetAtt = (OffsetAttribute)tokenStream.addAttribute(OffsetAttribute.class);
    tokenStream.addAttribute(PositionIncrementAttribute.class);
    tokenStream.reset();
    
    TextFragment currentFrag = new TextFragment(newText, newText.length(), docFrags.size());
    TokenStream newStream = fragmentScorer.init(tokenStream);
    if (newStream != null) {
      tokenStream = newStream;
    }
    fragmentScorer.startFragment(currentFrag);
    docFrags.add(currentFrag);
    
    FragmentQueue fragQueue = new FragmentQueue(maxNumFragments);
    try
    {
      int lastEndOffset = 0;
      textFragmenter.start(text, tokenStream);
      
      TokenGroup tokenGroup = new TokenGroup(tokenStream);
      for (boolean next = tokenStream.incrementToken(); (next) && (offsetAtt.startOffset() < maxDocCharsToAnalyze); next = tokenStream.incrementToken())
      {
        if ((offsetAtt.endOffset() > text.length()) || (offsetAtt.startOffset() > text.length())) {
          throw new InvalidTokenOffsetsException("Token " + termAtt.term() + " exceeds length of provided text sized " + text.length());
        }
        if ((numTokens > 0) && (tokenGroup.isDistinct()))
        {
          int startOffset = matchStartOffset;
          int endOffset = matchEndOffset;
          String tokenText = text.substring(startOffset, endOffset);
          String markedUpText = formatter.highlightTerm(encoder.encodeText(tokenText), tokenGroup);
          if (startOffset > lastEndOffset) {
            newText.append(encoder.encodeText(text.substring(lastEndOffset, startOffset)));
          }
          newText.append(markedUpText);
          lastEndOffset = Math.max(endOffset, lastEndOffset);
          tokenGroup.clear();
          if (textFragmenter.isNewFragment())
          {
            currentFrag.setScore(fragmentScorer.getFragmentScore());
            
            textEndPos = newText.length();
            currentFrag = new TextFragment(newText, newText.length(), docFrags.size());
            fragmentScorer.startFragment(currentFrag);
            docFrags.add(currentFrag);
          }
        }
        tokenGroup.addToken(fragmentScorer.getTokenScore());
      }
      currentFrag.setScore(fragmentScorer.getFragmentScore());
      if (numTokens > 0)
      {
        int startOffset = matchStartOffset;
        int endOffset = matchEndOffset;
        String tokenText = text.substring(startOffset, endOffset);
        String markedUpText = formatter.highlightTerm(encoder.encodeText(tokenText), tokenGroup);
        if (startOffset > lastEndOffset) {
          newText.append(encoder.encodeText(text.substring(lastEndOffset, startOffset)));
        }
        newText.append(markedUpText);
        lastEndOffset = Math.max(lastEndOffset, endOffset);
      }
      if ((lastEndOffset < text.length()) && (text.length() <= maxDocCharsToAnalyze)) {
        newText.append(encoder.encodeText(text.substring(lastEndOffset)));
      }
      textEndPos = newText.length();
      for (Iterator i = docFrags.iterator(); i.hasNext();)
      {
        currentFrag = (TextFragment)i.next();
        
        fragQueue.insertWithOverflow(currentFrag);
      }
      TextFragment[] frag = new TextFragment[fragQueue.size()];
      for (int i = frag.length - 1; i >= 0; i--) {
        frag[i] = ((TextFragment)fragQueue.pop());
      }
      ArrayList fragTexts;
      if (mergeContiguousFragments)
      {
        mergeContiguousFragments(frag);
        fragTexts = new ArrayList();
        for (int i = 0; i < frag.length; i++) {
          if ((frag[i] != null) && (frag[i].getScore() > 0.0F)) {
            fragTexts.add(frag[i]);
          }
        }
        frag = (TextFragment[])fragTexts.toArray(new TextFragment[0]);
      }
      return frag;
    }
    finally
    {
      if (tokenStream != null) {
        try
        {
          tokenStream.close();
        }
        catch (Exception e) {}
      }
    }
  }
  
  private void mergeContiguousFragments(TextFragment[] frag)
  {
    if (frag.length > 1)
    {
      boolean mergingStillBeingDone;
      do
      {
        mergingStillBeingDone = false;
        for (int i = 0; i < frag.length; i++) {
          if (frag[i] != null) {
            for (int x = 0; x < frag.length; x++) {
              if (frag[x] != null)
              {
                if (frag[i] == null) {
                  break;
                }
                TextFragment frag1 = null;
                TextFragment frag2 = null;
                int frag1Num = 0;
                int frag2Num = 0;
                if (frag[i].follows(frag[x]))
                {
                  frag1 = frag[x];
                  frag1Num = x;
                  frag2 = frag[i];
                  frag2Num = i;
                }
                else if (frag[x].follows(frag[i]))
                {
                  frag1 = frag[i];
                  frag1Num = i;
                  frag2 = frag[x];
                  frag2Num = x;
                }
                if (frag1 != null)
                {
                  int worstScoringFragNum;
                  int bestScoringFragNum;
                  int worstScoringFragNum;
                  if (frag1.getScore() > frag2.getScore())
                  {
                    int bestScoringFragNum = frag1Num;
                    worstScoringFragNum = frag2Num;
                  }
                  else
                  {
                    bestScoringFragNum = frag2Num;
                    worstScoringFragNum = frag1Num;
                  }
                  frag1.merge(frag2);
                  frag[worstScoringFragNum] = null;
                  mergingStillBeingDone = true;
                  frag[bestScoringFragNum] = frag1;
                }
              }
            }
          }
        }
      } while (mergingStillBeingDone);
    }
  }
  
  public final String getBestFragments(TokenStream tokenStream, String text, int maxNumFragments, String separator)
    throws IOException, InvalidTokenOffsetsException
  {
    String[] sections = getBestFragments(tokenStream, text, maxNumFragments);
    StringBuffer result = new StringBuffer();
    for (int i = 0; i < sections.length; i++)
    {
      if (i > 0) {
        result.append(separator);
      }
      result.append(sections[i]);
    }
    return result.toString();
  }
  
  /**
   * @deprecated
   */
  public int getMaxDocBytesToAnalyze()
  {
    return maxDocCharsToAnalyze;
  }
  
  /**
   * @deprecated
   */
  public void setMaxDocBytesToAnalyze(int byteCount)
  {
    maxDocCharsToAnalyze = byteCount;
  }
  
  public int getMaxDocCharsToAnalyze()
  {
    return maxDocCharsToAnalyze;
  }
  
  public void setMaxDocCharsToAnalyze(int maxDocCharsToAnalyze)
  {
    this.maxDocCharsToAnalyze = maxDocCharsToAnalyze;
  }
  
  public Fragmenter getTextFragmenter()
  {
    return textFragmenter;
  }
  
  public void setTextFragmenter(Fragmenter fragmenter)
  {
    textFragmenter = fragmenter;
  }
  
  public Scorer getFragmentScorer()
  {
    return fragmentScorer;
  }
  
  public void setFragmentScorer(Scorer scorer)
  {
    fragmentScorer = scorer;
  }
  
  public Encoder getEncoder()
  {
    return encoder;
  }
  
  public void setEncoder(Encoder encoder)
  {
    this.encoder = encoder;
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.search.highlight.Highlighter
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.search.highlight;

public class InvalidTokenOffsetsException
  extends Exception
{
  public InvalidTokenOffsetsException(String message)
  {
    super(message);
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.search.highlight.InvalidTokenOffsetsException
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.search.highlight;

import org.apache.lucene.analysis.TokenStream;

public class NullFragmenter
  implements Fragmenter
{
  public void start(String s, TokenStream tokenStream) {}
  
  public boolean isNewFragment()
  {
    return false;
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.search.highlight.NullFragmenter
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.search.highlight;

class PositionSpan
{
  int start;
  int end;
  
  public PositionSpan(int start, int end)
  {
    this.start = start;
    this.end = end;
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.search.highlight.PositionSpan
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.search.highlight;

import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.StringHelper;

public class QueryScorer
  implements Scorer
{
  private float totalScore;
  private Set foundTerms;
  private Map fieldWeightedSpanTerms;
  private float maxTermWeight;
  private int position = -1;
  private String defaultField;
  private TermAttribute termAtt;
  private PositionIncrementAttribute posIncAtt;
  private boolean expandMultiTermQuery = true;
  private Query query;
  private String field;
  private IndexReader reader;
  private boolean skipInitExtractor;
  private boolean wrapToCaching = true;
  
  public QueryScorer(Query query)
  {
    init(query, null, null, true);
  }
  
  public QueryScorer(Query query, String field)
  {
    init(query, field, null, true);
  }
  
  public QueryScorer(Query query, IndexReader reader, String field)
  {
    init(query, field, reader, true);
  }
  
  public QueryScorer(Query query, IndexReader reader, String field, String defaultField)
  {
    this.defaultField = StringHelper.intern(defaultField);
    init(query, field, reader, true);
  }
  
  public QueryScorer(Query query, String field, String defaultField)
  {
    this.defaultField = StringHelper.intern(defaultField);
    init(query, field, null, true);
  }
  
  public QueryScorer(WeightedSpanTerm[] weightedTerms)
  {
    fieldWeightedSpanTerms = new HashMap(weightedTerms.length);
    for (int i = 0; i < weightedTerms.length; i++)
    {
      WeightedSpanTerm existingTerm = (WeightedSpanTerm)fieldWeightedSpanTerms.get(term);
      if ((existingTerm == null) || (weight < weight))
      {
        fieldWeightedSpanTerms.put(term, weightedTerms[i]);
        maxTermWeight = Math.max(maxTermWeight, weightedTerms[i].getWeight());
      }
    }
    skipInitExtractor = true;
  }
  
  public float getFragmentScore()
  {
    return totalScore;
  }
  
  public float getMaxTermWeight()
  {
    return maxTermWeight;
  }
  
  public float getTokenScore()
  {
    position += posIncAtt.getPositionIncrement();
    String termText = termAtt.term();
    WeightedSpanTerm weightedSpanTerm;
    if ((weightedSpanTerm = (WeightedSpanTerm)fieldWeightedSpanTerms.get(termText)) == null) {
      return 0.0F;
    }
    if ((positionSensitive) && (!weightedSpanTerm.checkPosition(position))) {
      return 0.0F;
    }
    float score = weightedSpanTerm.getWeight();
    if (!foundTerms.contains(termText))
    {
      totalScore += score;
      foundTerms.add(termText);
    }
    return score;
  }
  
  public TokenStream init(TokenStream tokenStream)
    throws IOException
  {
    position = -1;
    termAtt = ((TermAttribute)tokenStream.addAttribute(TermAttribute.class));
    posIncAtt = ((PositionIncrementAttribute)tokenStream.addAttribute(PositionIncrementAttribute.class));
    if (!skipInitExtractor)
    {
      if (fieldWeightedSpanTerms != null) {
        fieldWeightedSpanTerms.clear();
      }
      return initExtractor(tokenStream);
    }
    return null;
  }
  
  public WeightedSpanTerm getWeightedSpanTerm(String token)
  {
    return (WeightedSpanTerm)fieldWeightedSpanTerms.get(token);
  }
  
  private void init(Query query, String field, IndexReader reader, boolean expandMultiTermQuery)
  {
    this.reader = reader;
    this.expandMultiTermQuery = expandMultiTermQuery;
    this.query = query;
    this.field = field;
  }
  
  private TokenStream initExtractor(TokenStream tokenStream)
    throws IOException
  {
    WeightedSpanTermExtractor qse = defaultField == null ? new WeightedSpanTermExtractor() : new WeightedSpanTermExtractor(defaultField);
    
    qse.setExpandMultiTermQuery(expandMultiTermQuery);
    qse.setWrapIfNotCachingTokenFilter(wrapToCaching);
    if (reader == null) {
      fieldWeightedSpanTerms = qse.getWeightedSpanTerms(query, tokenStream, field);
    } else {
      fieldWeightedSpanTerms = qse.getWeightedSpanTermsWithScores(query, tokenStream, field, reader);
    }
    if (qse.isCachedTokenStream()) {
      return qse.getTokenStream();
    }
    return null;
  }
  
  public void startFragment(TextFragment newFragment)
  {
    foundTerms = new HashSet();
    totalScore = 0.0F;
  }
  
  public boolean isExpandMultiTermQuery()
  {
    return expandMultiTermQuery;
  }
  
  public void setExpandMultiTermQuery(boolean expandMultiTermQuery)
  {
    this.expandMultiTermQuery = expandMultiTermQuery;
  }
  
  public void setWrapIfNotCachingTokenFilter(boolean wrap)
  {
    wrapToCaching = wrap;
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.search.highlight.QueryScorer
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.search.highlight;

import java.io.IOException;
import java.util.HashSet;
import java.util.Iterator;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.FilteredQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.StringHelper;

public final class QueryTermExtractor
{
  public static final WeightedTerm[] getTerms(Query query)
  {
    return getTerms(query, false);
  }
  
  public static final WeightedTerm[] getIdfWeightedTerms(Query query, IndexReader reader, String fieldName)
  {
    WeightedTerm[] terms = getTerms(query, false, fieldName);
    int totalNumDocs = reader.numDocs();
    for (int i = 0; i < terms.length; i++) {
      try
      {
        int docFreq = reader.docFreq(new Term(fieldName, term));
        if (totalNumDocs < docFreq) {
          docFreq = totalNumDocs;
        }
        float idf = (float)(Math.log(totalNumDocs / (docFreq + 1)) + 1.0D);
        weight *= idf;
      }
      catch (IOException e) {}
    }
    return terms;
  }
  
  public static final WeightedTerm[] getTerms(Query query, boolean prohibited, String fieldName)
  {
    HashSet terms = new HashSet();
    if (fieldName != null) {
      fieldName = StringHelper.intern(fieldName);
    }
    getTerms(query, terms, prohibited, fieldName);
    return (WeightedTerm[])terms.toArray(new WeightedTerm[0]);
  }
  
  public static final WeightedTerm[] getTerms(Query query, boolean prohibited)
  {
    return getTerms(query, prohibited, null);
  }
  
  private static final void getTerms(Query query, HashSet terms, boolean prohibited, String fieldName)
  {
    Iterator iter;
    try
    {
      if ((query instanceof BooleanQuery))
      {
        getTermsFromBooleanQuery((BooleanQuery)query, terms, prohibited, fieldName);
      }
      else if ((query instanceof FilteredQuery))
      {
        getTermsFromFilteredQuery((FilteredQuery)query, terms, prohibited, fieldName);
      }
      else
      {
        HashSet nonWeightedTerms = new HashSet();
        query.extractTerms(nonWeightedTerms);
        for (iter = nonWeightedTerms.iterator(); iter.hasNext();)
        {
          Term term = (Term)iter.next();
          if ((fieldName == null) || (term.field() == fieldName)) {
            terms.add(new WeightedTerm(query.getBoost(), term.text()));
          }
        }
      }
    }
    catch (UnsupportedOperationException ignore) {}
  }
  
  private static final void getTermsFromBooleanQuery(BooleanQuery query, HashSet terms, boolean prohibited, String fieldName)
  {
    BooleanClause[] queryClauses = query.getClauses();
    for (int i = 0; i < queryClauses.length; i++) {
      if ((prohibited) || (queryClauses[i].getOccur() != BooleanClause.Occur.MUST_NOT)) {
        getTerms(queryClauses[i].getQuery(), terms, prohibited, fieldName);
      }
    }
  }
  
  private static void getTermsFromFilteredQuery(FilteredQuery query, HashSet terms, boolean prohibited, String fieldName)
  {
    getTerms(query.getQuery(), terms, prohibited, fieldName);
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.search.highlight.QueryTermExtractor
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.search.highlight;

import java.util.HashMap;
import java.util.HashSet;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Query;

public class QueryTermScorer
  implements Scorer
{
  TextFragment currentTextFragment = null;
  HashSet uniqueTermsInFragment;
  float totalScore = 0.0F;
  float maxTermWeight = 0.0F;
  private HashMap termsToFind;
  private TermAttribute termAtt;
  
  public QueryTermScorer(Query query)
  {
    this(QueryTermExtractor.getTerms(query));
  }
  
  public QueryTermScorer(Query query, String fieldName)
  {
    this(QueryTermExtractor.getTerms(query, false, fieldName));
  }
  
  public QueryTermScorer(Query query, IndexReader reader, String fieldName)
  {
    this(QueryTermExtractor.getIdfWeightedTerms(query, reader, fieldName));
  }
  
  public QueryTermScorer(WeightedTerm[] weightedTerms)
  {
    termsToFind = new HashMap();
    for (int i = 0; i < weightedTerms.length; i++)
    {
      WeightedTerm existingTerm = (WeightedTerm)termsToFind.get(term);
      if ((existingTerm == null) || (weight < weight))
      {
        termsToFind.put(term, weightedTerms[i]);
        maxTermWeight = Math.max(maxTermWeight, weightedTerms[i].getWeight());
      }
    }
  }
  
  public TokenStream init(TokenStream tokenStream)
  {
    termAtt = ((TermAttribute)tokenStream.addAttribute(TermAttribute.class));
    return null;
  }
  
  public void startFragment(TextFragment newFragment)
  {
    uniqueTermsInFragment = new HashSet();
    currentTextFragment = newFragment;
    totalScore = 0.0F;
  }
  
  public float getTokenScore()
  {
    String termText = termAtt.term();
    
    WeightedTerm queryTerm = (WeightedTerm)termsToFind.get(termText);
    if (queryTerm == null) {
      return 0.0F;
    }
    if (!uniqueTermsInFragment.contains(termText))
    {
      totalScore += queryTerm.getWeight();
      uniqueTermsInFragment.add(termText);
    }
    return queryTerm.getWeight();
  }
  
  public float getFragmentScore()
  {
    return totalScore;
  }
  
  public void allFragmentsProcessed() {}
  
  public float getMaxTermWeight()
  {
    return maxTermWeight;
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.search.highlight.QueryTermScorer
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.search.highlight;

import java.io.IOException;
import org.apache.lucene.analysis.TokenStream;

public abstract interface Scorer
{
  public abstract TokenStream init(TokenStream paramTokenStream)
    throws IOException;
  
  public abstract void startFragment(TextFragment paramTextFragment);
  
  public abstract float getTokenScore();
  
  public abstract float getFragmentScore();
}

/* Location:
 * Qualified Name:     org.apache.lucene.search.highlight.Scorer
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.search.highlight;

import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;

public class SimpleFragmenter
  implements Fragmenter
{
  private static final int DEFAULT_FRAGMENT_SIZE = 100;
  private int currentNumFrags;
  private int fragmentSize;
  private OffsetAttribute offsetAtt;
  
  public SimpleFragmenter()
  {
    this(100);
  }
  
  public SimpleFragmenter(int fragmentSize)
  {
    this.fragmentSize = fragmentSize;
  }
  
  public void start(String originalText, TokenStream stream)
  {
    offsetAtt = ((OffsetAttribute)stream.addAttribute(OffsetAttribute.class));
    currentNumFrags = 1;
  }
  
  public boolean isNewFragment()
  {
    boolean isNewFrag = offsetAtt.endOffset() >= fragmentSize * currentNumFrags;
    if (isNewFrag) {
      currentNumFrags += 1;
    }
    return isNewFrag;
  }
  
  public int getFragmentSize()
  {
    return fragmentSize;
  }
  
  public void setFragmentSize(int size)
  {
    fragmentSize = size;
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.search.highlight.SimpleFragmenter
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.search.highlight;

public class SimpleHTMLEncoder
  implements Encoder
{
  public String encodeText(String originalText)
  {
    return htmlEncode(originalText);
  }
  
  public static final String htmlEncode(String plainText)
  {
    if ((plainText == null) || (plainText.length() == 0)) {
      return "";
    }
    StringBuffer result = new StringBuffer(plainText.length());
    for (int index = 0; index < plainText.length(); index++)
    {
      char ch = plainText.charAt(index);
      switch (ch)
      {
      case '"': 
        result.append("&quot;");
        break;
      case '&': 
        result.append("&amp;");
        break;
      case '<': 
        result.append("&lt;");
        break;
      case '>': 
        result.append("&gt;");
        break;
      default: 
        if (ch < '?') {
          result.append(ch);
        } else {
          result.append("&#").append(ch).append(";");
        }
        break;
      }
    }
    return result.toString();
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.search.highlight.SimpleHTMLEncoder
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.search.highlight;

public class SimpleHTMLFormatter
  implements Formatter
{
  private static final String DEFAULT_PRE_TAG = "<B>";
  private static final String DEFAULT_POST_TAG = "</B>";
  private String preTag;
  private String postTag;
  
  public SimpleHTMLFormatter(String preTag, String postTag)
  {
    this.preTag = preTag;
    this.postTag = postTag;
  }
  
  public SimpleHTMLFormatter()
  {
    this("<B>", "</B>");
  }
  
  public String highlightTerm(String originalText, TokenGroup tokenGroup)
  {
    if (tokenGroup.getTotalScore() <= 0.0F) {
      return originalText;
    }
    StringBuffer returnBuffer = new StringBuffer(preTag.length() + originalText.length() + postTag.length());
    returnBuffer.append(preTag);
    returnBuffer.append(originalText);
    returnBuffer.append(postTag);
    return returnBuffer.toString();
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.search.highlight.SimpleHTMLFormatter
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.search.highlight;

import java.util.List;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;

public class SimpleSpanFragmenter
  implements Fragmenter
{
  private static final int DEFAULT_FRAGMENT_SIZE = 100;
  private int fragmentSize;
  private int currentNumFrags;
  private int position = -1;
  private QueryScorer queryScorer;
  private int waitForPos = -1;
  private int textSize;
  private TermAttribute termAtt;
  private PositionIncrementAttribute posIncAtt;
  private OffsetAttribute offsetAtt;
  
  public SimpleSpanFragmenter(QueryScorer queryScorer)
  {
    this(queryScorer, 100);
  }
  
  public SimpleSpanFragmenter(QueryScorer queryScorer, int fragmentSize)
  {
    this.fragmentSize = fragmentSize;
    this.queryScorer = queryScorer;
  }
  
  public boolean isNewFragment()
  {
    position += posIncAtt.getPositionIncrement();
    if (waitForPos == position) {
      waitForPos = -1;
    } else if (waitForPos != -1) {
      return false;
    }
    WeightedSpanTerm wSpanTerm = queryScorer.getWeightedSpanTerm(termAtt.term());
    if (wSpanTerm != null)
    {
      List positionSpans = wSpanTerm.getPositionSpans();
      for (int i = 0; i < positionSpans.size(); i++) {
        if (getstart == position)
        {
          waitForPos = (getend + 1);
          break;
        }
      }
    }
    boolean isNewFrag = (offsetAtt.endOffset() >= fragmentSize * currentNumFrags) && (textSize - offsetAtt.endOffset() >= fragmentSize >>> 1);
    if (isNewFrag) {
      currentNumFrags += 1;
    }
    return isNewFrag;
  }
  
  public void start(String originalText, TokenStream tokenStream)
  {
    position = -1;
    currentNumFrags = 1;
    textSize = originalText.length();
    termAtt = ((TermAttribute)tokenStream.addAttribute(TermAttribute.class));
    posIncAtt = ((PositionIncrementAttribute)tokenStream.addAttribute(PositionIncrementAttribute.class));
    offsetAtt = ((OffsetAttribute)tokenStream.addAttribute(OffsetAttribute.class));
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.search.highlight.SimpleSpanFragmenter
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.search.highlight;

public class SpanGradientFormatter
  extends GradientFormatter
{
  private static final String TEMPLATE = "<span style=\"background: #EEEEEE; color: #000000;\">...</span>";
  
  public SpanGradientFormatter(float maxScore, String minForegroundColor, String maxForegroundColor, String minBackgroundColor, String maxBackgroundColor)
  {
    super(maxScore, minForegroundColor, maxForegroundColor, minBackgroundColor, maxBackgroundColor);
  }
  
  public String highlightTerm(String originalText, TokenGroup tokenGroup)
  {
    if (tokenGroup.getTotalScore() == 0.0F) {
      return originalText;
    }
    float score = tokenGroup.getTotalScore();
    if (score == 0.0F) {
      return originalText;
    }
    StringBuffer sb = new StringBuffer(originalText.length() + EXTRA);
    
    sb.append("<span style=\"");
    if (highlightForeground)
    {
      sb.append("color: ");
      sb.append(getForegroundColorString(score));
      sb.append("; ");
    }
    if (highlightBackground)
    {
      sb.append("background: ");
      sb.append(getBackgroundColorString(score));
      sb.append("; ");
    }
    sb.append("\">");
    sb.append(originalText);
    sb.append("</span>");
    return sb.toString();
  }
  
  private static final int EXTRA = "<span style=\"background: #EEEEEE; color: #000000;\">...</span>".length();
}

/* Location:
 * Qualified Name:     org.apache.lucene.search.highlight.SpanGradientFormatter
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.search.highlight;

public class TextFragment
{
  StringBuffer markedUpText;
  int fragNum;
  int textStartPos;
  int textEndPos;
  float score;
  
  public TextFragment(StringBuffer markedUpText, int textStartPos, int fragNum)
  {
    this.markedUpText = markedUpText;
    this.textStartPos = textStartPos;
    this.fragNum = fragNum;
  }
  
  void setScore(float score)
  {
    this.score = score;
  }
  
  public float getScore()
  {
    return score;
  }
  
  public void merge(TextFragment frag2)
  {
    textEndPos = textEndPos;
    score = Math.max(score, score);
  }
  
  public boolean follows(TextFragment fragment)
  {
    return textStartPos == textEndPos;
  }
  
  public int getFragNum()
  {
    return fragNum;
  }
  
  public String toString()
  {
    return markedUpText.substring(textStartPos, textEndPos);
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.search.highlight.TextFragment
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.search.highlight;

import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;

public class TokenGroup
{
  private static final int MAX_NUM_TOKENS_PER_GROUP = 50;
  Token[] tokens = new Token[50];
  float[] scores = new float[50];
  int numTokens = 0;
  int startOffset = 0;
  int endOffset = 0;
  float tot;
  int matchStartOffset;
  int matchEndOffset;
  private OffsetAttribute offsetAtt;
  private TermAttribute termAtt;
  
  public TokenGroup(TokenStream tokenStream)
  {
    offsetAtt = ((OffsetAttribute)tokenStream.addAttribute(OffsetAttribute.class));
    termAtt = ((TermAttribute)tokenStream.addAttribute(TermAttribute.class));
  }
  
  void addToken(float score)
  {
    if (numTokens < 50)
    {
      int termStartOffset = offsetAtt.startOffset();
      int termEndOffset = offsetAtt.endOffset();
      if (numTokens == 0)
      {
        startOffset = (matchStartOffset = termStartOffset);
        endOffset = (matchEndOffset = termEndOffset);
        tot += score;
      }
      else
      {
        startOffset = Math.min(startOffset, termStartOffset);
      
1 2

Further reading...

For more information on Java 1.5 Tiger, you may find Java 1.5 Tiger, A developer's Notebook by D. Flanagan and B. McLaughlin from O'Reilly of interest.

New!JAR listings


Copyright 2006-2017. Infinite Loop Ltd