位于:${solr.home}\example\techproducts\solr\techproducts\conf\solrconfig.xml
<?xml version="1.0" encoding="UTF-8" ?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--> <!--
更多的配置选项,请参见 http://wiki.apache.org/solr/SolrConfigXml.
-->
<config>
<!-- 以下所有配置中的"solr."前辍代表的是一些相应的包名,如: org.apache.solr.(search|update|request|core|analysis) 对于自定义的插件,你需要指定全名(包名+类名).
--> <!-- 控制Solr附属的Lucene各组件的版本. 最新版本解决了已知的bug,并进行了一些提升。通常情况下,你需要使用最新版本。
如果修改了此值,强烈建议重新进行索引。
-->
<luceneMatchVersion>6.5.0</luceneMatchVersion> <!-- <lib/> 指示Solr去加载哪些Jar包,并且resolve你在solrconfig.xml或schema.xml中指定的
插件(如: Analyzers, Request Handlers, etc...). 所有的目录均是相对目录,相对于 instanceDir. 按照<lib/>在solrconfig.xml中出现的顺序去处理<lib/>, 如果某个plugin的jar依赖其它的jar包,则
那些被依赖的jar包需要写在前面。 如果"./lib"目录存在于instanceDir中, 如下配置可使所有此目录下的文件均被加载 <lib dir="./lib" />
--> <!-- 'dir' 用于把此目录下所有的jar包都加入到classpath。 'regex' 符合此正则表达示的jar包才会被加载 下面的例子加载了一些solr-contribs和其外部依赖
-->
<lib dir="${solr.install.dir:../../../..}/contrib/extraction/lib" regex=".*\.jar" />
<lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-cell-\d.*\.jar" /> <lib dir="${solr.install.dir:../../../..}/contrib/clustering/lib/" regex=".*\.jar" />
<lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-clustering-\d.*\.jar" /> <lib dir="${solr.install.dir:../../../..}/contrib/langid/lib/" regex=".*\.jar" />
<lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-langid-\d.*\.jar" /> <lib dir="${solr.install.dir:../../../..}/contrib/velocity/lib" regex=".*\.jar" />
<lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-velocity-\d.*\.jar" /> <!-- 'path' 用于指定单独的jar包。如果此jar包不能被加载,则会被记录为严重错误。 -->
<!--
<lib path="../a-jar-that-does-not-exist.jar" />
--> <!-- 指定索引数据文件的目录。默认值是Solr home 下的data目录。如果使用了副本,
则需要和副本中的配置一致。 -->
<dataDir>${solr.data.dir:}</dataDir> <!-- 索引使用的 DirectoryFactory solr.StandardDirectoryFactory 基于文件系统,并尽力为当前JVM和平台 选择较好的实现。
solr.NRTCachingDirectoryFactory 默认的DirectoryFactory。包装了 solr.StandardDirectoryFactory,
并缓存了一些小文件以提供较好的NRT(Near-Real-Time)性能 特别的一些实现: solr.MMapDirectoryFactory, solr.NIOFSDirectoryFactory, or solr.SimpleFSDirectoryFactory. solr.RAMDirectoryFactory 基于内存,不会持久化,不能和副本一起工作。个人认为只适用于开发测试。
-->
<directoryFactory name="DirectoryFactory"
class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/> <!-- CodecFactory 定义倒排索引的格式。默认实现是 SchemaCodecFactory。
SchemaCodecFactory 是 Lucene 的官方索引格式,并另外提供了自定义的(postingsFormat/docValuesFormat)属性。
注意,大部分的可选的实现都是实验性的。因此如果你自定义了索引格式,你最好在升级之前转化成官方的格式
(通过 IndexWriter.addIndexes(IndexReader))以避免不必要的重新索引。
"compressionMode" 属性可以加入到 <codecFactory> 中,其值可以为 "BEST_SPEED" (default) or "BEST_COMPRESSION".
-->
<codecFactory class="solr.SchemaCodecFactory"/> <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Index Config - 这些配置控制索引的底层行为。
这里多数的示例配置使用了默认值。为了更容易地看清楚哪些是自定义的,所以被注释掉了。
注意: 此配置代替了旧版本中的 <indexDefaults> and <mainIndex>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
<indexConfig>
<!-- 从4.0开始,maxFieldLength被移掉了。可以在fieldType定义中包含
LimitTokenCountFilterFactory 以获得相似的功能。例如:
<filter class="solr.LimitTokenCountFilterFactory" maxTokenCount="10000"/>
-->
<!-- IndexWriter等待写锁的最长等待时间(ms)。 默认值: 1000 -->
<!-- <writeLockTimeout>1000</writeLockTimeout> --> <!-- 启用组合文件将会使索引数据文件数量减少(以降低性能为代价)
Lucene中默认值是"true". Solr是"false" (since 3.6) -->
<!-- <useCompoundFile>false</useCompoundFile> --> <!-- ramBufferSizeMB 设置Lucene索引时用于缓存增加document和删除操作的 RAM 大小。默认值 100MB
maxBufferedDocs 设置最大缓存的document数量。
如果同时设置了这两个值,则只要有一个条件满足,就进行flush操作。 -->
<!-- <ramBufferSizeMB>100</ramBufferSizeMB> -->
<!-- <maxBufferedDocs>1000</maxBufferedDocs> --> <!-- Merge Policy 控制如何合并片断(segments)
从Solr/Lucene 3.3开始,默认值是 TieredMergePolicy.
Lucene 2.3及以前,默认值是 LogByteSizeMergePolicy
-->
<!--
<mergePolicyFactory class="org.apache.solr.index.TieredMergePolicyFactory">
<int name="maxMergeAtOnce">10</int>
<int name="segmentsPerTier">10</int>
<double name="noCFSRatio">0.1</double>
</mergePolicyFactory>
--> <!-- Merge Scheduler 控制如何进行合并。
ConcurrentMergeScheduler (Lucene 2.3 default)能够在后台以独立的线程进行合并
The SerialMergeScheduler (Lucene 2.2 default) 则不行.
-->
<!--
<mergeScheduler class="org.apache.lucene.index.ConcurrentMergeScheduler"/>
--> <!-- LockFactory
以下值代表不同的实现 single = SingleInstanceLockFactory - 用于只读的索引或没有其它程序修改索引的情形
native = NativeFSLockFactory - 使用系统本身的文件锁。当同个JVM有多个Solr应用并且共享一个索引时不要用这个
simple = SimpleFSLockFactory - 使用简单的文件锁 Solr3.6及以后,默认值是'native', 之前默认值是 'simple' 更多信息详见 http://wiki.apache.org/lucene-java/AvailableLockFactories
-->
<lockType>${solr.lock.type:native}</lockType> <!-- Commit Deletion Policy
这里可以指定自定义删除策略。自定义的类必须实现 org.apache.lucene.index.IndexDeletionPolicy.
Solr 默认的IndexDeletionPolicy 实现支持基于提交次数,提交时间段和最优化状态时进行删除索引的提交操作
不管怎么样,必须保留最近的提交点
-->
<!--
<deletionPolicy class="solr.SolrDeletionPolicy">
-->
<!-- The number of commit points to be kept -->
<!-- <str name="maxCommitsToKeep">1</str> -->
<!-- The number of optimized commit points to be kept -->
<!-- <str name="maxOptimizedCommitsToKeep">0</str> -->
<!-- 达到下面的时间点时,所有的commit points将会被删除 -->
<!--
<str name="maxCommitAge">30MINUTES</str>
<str name="maxCommitAge">1DAY</str>
-->
<!--
</deletionPolicy>
--> <!-- Lucene Infostream
有助于高级调试,Lucene提供了索引时的详细信息
设置为true时,将把以 Lucene为基础的 IndexWriter的 info stream 写入到Solr的日志中
默认设置为TRUE,可通过log4j.properties 配置.
-->
<infoStream>true</infoStream>
</indexConfig> <!-- JMX This example enables JMX if and only if an existing MBeanServer
is found, use this if you want to configure JMX through JVM
parameters. Remove this to disable exposing Solr configuration
and statistics to JMX. For more details see http://wiki.apache.org/solr/SolrJmx
-->
<jmx />
<!-- If you want to connect to a particular server, specify the
agentId
-->
<!-- <jmx agentId="myAgent" /> -->
<!-- If you want to start a new MBeanServer, specify the serviceUrl -->
<!-- <jmx serviceUrl="service:jmx:rmi:///jndi/rmi://localhost:9999/solr"/>
--> <!-- 默认的高性能的 update handler -->
<updateHandler class="solr.DirectUpdateHandler2"> <!-- 启用事务日志,用于实时获取,持久化和Solr云副本恢复。日志能够随着未提交的索引增大而增大,因此推荐
使用一个确切的 autoCommit.
"dir" - 事务日志的目录。默认使用Solr的data目录
"numVersionBuckets" - sets the number of buckets used to keep
track of max version values when checking for re-ordered
updates; increase this value to reduce the cost of
synchronizing access to version buckets during high-volume
indexing, this requires 8 bytes (long) * numVersionBuckets
of heap space per Solr core.
-->
<updateLog>
<str name="dir">${solr.ulog.dir:}</str>
<int name="numVersionBuckets">${solr.ulog.numVersionBuckets:65536}</int>
</updateLog> <!-- AutoCommit
满足某种条件时进行提交。增加document时,可使用 "commitWithin"来代替 autoCommit
http://wiki.apache.org/solr/UpdateXmlMessages
maxDocs - 触发新的提交前的最大document数量.
maxTime - 触发新的提交前的最长时间(ms)
openSearcher - 为false时, 提交会使最近索引的变化保存下来。但是这些变化对新的searcher不可见
如果启用 updateLog,则强烈建议使用某种autoCommit,以限制日志大小。
-->
<autoCommit>
<maxTime>${solr.autoCommit.maxTime:15000}</maxTime>
<openSearcher>false</openSearcher>
</autoCommit> <!-- softAutoCommit 与 autoCommit 类似,但是只保证改变可见,不保证改变同步到硬盘。
比hard commit 更快,更接近实时搜索 -->
<autoSoftCommit>
<maxTime>${solr.autoSoftCommit.maxTime:-1}</maxTime>
</autoSoftCommit> <!-- 与更新相关的事件监听器
postCommit - 在每次提交或优化命令后触发
postOptimize - 在每次优化命令后触发
-->
<!-- RunExecutableListener 执行钩子(例如postCommit or postOptimize)中的外部命令 exe - 可执行文件名
dir - 当前工作目录(default=".")
wait - 调用命令的线程是否等待,直到被调用的命令结束 (default="true")
args - the arguments to pass to the program. (default is none)
env - environment variables to set. (default is none)
-->
<!-- RunExecutableListener 的用法详见:
http://wiki.apache.org/solr/CollectionDistribution
-->
<!--
<listener event="postCommit" class="solr.RunExecutableListener">
<str name="exe">solr/bin/snapshooter</str>
<str name="dir">.</str>
<bool name="wait">true</bool>
<arr name="args"> <str>arg1</str> <str>arg2</str> </arr>
<arr name="env"> <str>MYVAR=val1</str> </arr>
</listener>
--> </updateHandler> <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Query 部分的设置用于控制查询时的行为
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
<query>
<!-- Max Boolean Clauses 最大 BooleanQuery 分支数
** WARNING **
此设置将影响全局的 Lucene 属性,所以会影响所有的 SolrCores. 如果多个 solrconfig.xml files
设置了不同的值,将会使用最后被初始化的SolrCore中的值.
-->
<maxBooleanClauses>1024</maxBooleanClauses> <!-- Slow Query Threshold (in millis)
对于高频率查询请求,记录所有的请求将成为瓶颈。因此通常会关闭 INFO 日志。 我们可以设置一个latency threshold
来判断哪些请求是被认为比较慢的请求,用WARN级别记录此类请求。从而我们可以较容易的判断那些比较慢的查询
-->
<slowQueryThresholdMillis>-1</slowQueryThresholdMillis> <!-- Solr内部查询缓存 有两种缓存实现:
LRUCache - 基于同步的 LinkedHashMap
FastLRUCache - 基于 ConcurrentHashMap. 在单线程中,FastLRUCache 具有较快的 gets,较慢的 puts。因此当缓存命中率
比较高时(>75%), 会比 LRUCache 更快;并且在多CPU环境下也可能更快一点
--> <!-- Filter Cache
用于缓存未排序的 SolrIndexSearcher 的查询结果集,
当打开一个新的 searcher 时,可能会使用旧的 searcher 中缓存的值 Parameters:
class - SolrCache实现类 (LRUCache or FastLRUCache)
size - 缓存中的最大实体数
initialSize - 初始容量 (实体数)
autowarmCount - 从旧的缓存中导入的预加载的实体数
-->
<filterCache class="solr.FastLRUCache"
size="512"
initialSize="512"
autowarmCount="0"/> <!-- Query Result Cache 缓存排序后的查询结果
maxRamMB - 最大缓存容量
-->
<queryResultCache class="solr.LRUCache"
size="512"
initialSize="512"
autowarmCount="0"/> <!-- Document Cache 缓存文档对象 Since Lucene internal document ids are transient,
this cache will not be autowarmed.
-->
<documentCache class="solr.LRUCache"
size="512"
initialSize="512"
autowarmCount="0"/> <!-- custom cache currently used by block join -->
<cache name="perSegFilter"
class="solr.search.LRUCache"
size="10"
initialSize="0"
autowarmCount="10"
regenerator="solr.NoOpRegenerator" /> <!-- Field Value Cache 字段值缓存
如果此处不设置的话,依然会创建默认的fieldValueCache
-->
<!--
<fieldValueCache class="solr.FastLRUCache"
size="512"
autowarmCount="128"
showItems="32" />
--> <!-- Custom Cache 自定义缓存 Example of a generic cache. These caches may be accessed by
name through SolrIndexSearcher.getCache(),cacheLookup(), and
cacheInsert(). The purpose is to enable easy caching of
user/application level data. The regenerator argument should
be specified as an implementation of solr.CacheRegenerator
if autowarming is desired.
-->
<!--
<cache name="myUserCache"
class="solr.LRUCache"
size="4096"
initialSize="1024"
autowarmCount="1024"
regenerator="com.mycompany.MyRegenerator"
/>
--> <!-- Lazy Field Loading 延时加载Field 设置为 true, stored fields 只会在需要的时候才加载。如果通常不需要加载所有的
stored fields, 将会极大地提升性能,尤其是包含大的压缩过的文本字段的情况
-->
<enableLazyFieldLoading>true</enableLazyFieldLoading> <!-- Use Filter For Sorted Query A possible optimization that attempts to use a filter to
satisfy a search. If the requested sort does not include
score, then the filterCache will be checked for a filter
matching the query. If found, the filter will be used as the
source of document ids, and then the sort will be applied to
that. 此设置只用于频繁地使用同一查询,只是排序选项有所不同并且不用"score"字段排序的情况。
一般情况下,较少使用。
-->
<!--
<useFilterForSortedQuery>true</useFilterForSortedQuery>
--> <!-- Result Window Size 对queryResultCache的优化。比查询请求要求的更多的结果将会被保存。
个人感觉是对分页查询的优化。比如每页需要10条记录,此值设置为30时,第一次查询时就
会取出30条记录,则跑到第二页时,不需要再次查询,而是直接从上一次的查询结果集里取。
-->
<queryResultWindowSize>30</queryResultWindowSize> <!-- queryResultCache 中缓存的最大Document数量
-->
<queryResultMaxDocsCached>200</queryResultMaxDocsCached> <!-- Query Related Event Listeners 跟查询相关的事件监听器 newSearcher - 打开一个新的searcher时(当前有searcher处理请求)触发。 它被用来准备某些缓存以防止某种长时间的查询请求 firstSearcher - 打开一个新的searcher时(当前没有注册过的searcher处理请求或者无法获得预热数据)触发 个人理解:firstSearcher 是在应用启动后,如果一次查询都没有做过,当进行第一次查询时,会被触发。newSearcher是从第二次查询
开始触发。
-->
<!-- QuerySenderListener takes an array of NamedList and executes a
local query request for each NamedList in sequence.
-->
<listener event="newSearcher" class="solr.QuerySenderListener">
<arr name="queries">
<!--
<lst><str name="q">solr</str><str name="sort">price asc</str></lst>
<lst><str name="q">rocks</str><str name="sort">weight asc</str></lst>
-->
</arr>
</listener>
<listener event="firstSearcher" class="solr.QuerySenderListener">
<arr name="queries">
<lst>
<str name="q">static firstSearcher warming in solrconfig.xml</str>
</lst>
</arr>
</listener> <!-- Use Cold Searcher
如果有一个新的查询请求,但是当前没有注册过的searcher,则马上注册仍在预热的searcher,
并使用它。如果设置成false,所有的请求将被阻塞直到第一个searcher预热完毕。
-->
<useColdSearcher>false</useColdSearcher> <!-- Max Warming Searchers 后台同时预热的最大searcher数。 超出此值将报错。
只读的slave建议设置1-2 , 为master可设置更高的值
-->
<maxWarmingSearchers>2</maxWarmingSearchers> </query> <!-- Request Dispatcher 这部分介绍SolrDispatchFilter如何处理请求
handleSelect 遗留的选项,将影响某些请求(例如 "/select?qt=XXX")的行为
handleSelect="true" SolrDispatchFilter处理请求并转发到"qt"参数指定的handler, 假设"/select"没有被注册.
handleSelect="false" SolrDispatchFilter 忽略"/select"请求, 返回404,除非明确有名为"/select"的handler handleSelect="true" 对于新用户,不推荐使用此值, 但是此值向后兼容
-->
<requestDispatcher handleSelect="false" >
<!-- Request Parsing 配置如何解析请求,哪些约束将应用于请求中的ContentStreams enableRemoteStreaming - 启用用于标识远程stream的stream.file和 stream.url参数.
multipartUploadLimitInKB - 指定上传多文件时所允许的最大字节数(in KiB)
formdataUploadLimitInKB - 指定通过POST提交的数据(application/x-www-form-urlencoded)的最大字节数 (in KiB)
addHttpRequestToContext - 设置成true时,SolrQueryRequest的上下文中将包含原始的HttpServletRequest,其key为"httpRequest".
所有Solr的component中都不会用到它,但是自定义的组件也许会用到它。 *** WARNING ***
下面的配置允许Solr获取远程文件,你必须确保你的系统有权限访问这些文件
-->
<requestParsers enableRemoteStreaming="true"
multipartUploadLimitInKB="2048000"
formdataUploadLimitInKB="2048"
addHttpRequestToContext="false"/> <!-- HTTP Caching 设置HTTP缓存相关的参数 (for proxy caches and clients). 下面的设置表示Solr 不会输出任何与HTTP缓存相关的header信息
-->
<httpCaching never304="true" />
<!-- 如果包含 <cacheControl> 元素, 此元素用于生成 Cache-Control header (比如控制过期的header("max-age=XXX" )
默认情况下不会生成Cache-Control header -->
<!--
<httpCaching never304="true" >
<cacheControl>max-age=30, public</cacheControl>
</httpCaching>
-->
<!-- 为了自动生成 HTTP Caching headers并正确地响应Cache Validation 请求,需要设置 never304="false" 这将使用Solr产生基于Index属性的 Last-Modified and ETag headers The following options can also be specified to affect the
values of these headers... lastModFrom - the default value is "openTime" which means the
Last-Modified value (and validation against If-Modified-Since
requests) will all be relative to when the current Searcher
was opened. You can change it to lastModFrom="dirLastMod" if
you want the value to exactly correspond to when the physical
index was last modified. etagSeed="..." is an option you can change to force the ETag
header (and validation against If-None-Match requests) to be
different even if the index has not changed (ie: when making
significant changes to your config file) (lastModifiedFrom and etagSeed are both ignored if you use
the never304="true" option)
-->
<!--
<httpCaching lastModifiedFrom="openTime"
etagSeed="Solr">
<cacheControl>max-age=30, public</cacheControl>
</httpCaching>
-->
</requestDispatcher> <!-- Request Handlers http://wiki.apache.org/solr/SolrRequestHandler 查询请求将被转发到请求中指定的handler Legacy behavior: If the request path uses "/select" but no Request
Handler has that name, and if handleSelect="true" has been specified in
the requestDispatcher, then the Request Handler is dispatched based on
the qt parameter. Handlers without a leading '/' are accessed this way
like so: http://host/app/[core/]select?qt=name If no qt is
given, then the requestHandler that declares default="true" will be
used or the one named "standard". If a Request Handler is declared with startup="lazy", then it will
not be initialized until the first request that uses it. -->
<!-- SearchHandler http://wiki.apache.org/solr/SearchHandler For processing Search Queries, the primary Request Handler
provided with Solr is "SearchHandler" It delegates to a sequent
of SearchComponents (see below) and supports distributed
queries across multiple shards
-->
<requestHandler name="/select" class="solr.SearchHandler">
<!-- 默认的查询参数,可被请求中的参数覆盖 -->
<lst name="defaults">
<str name="echoParams">explicit</str>
<int name="rows">10</int>
<!-- 控制请求分配到自身还是其它的分片
Consider making 'preferLocalShards' true when:
1) maxShardsPerNode > 1
2) Number of shards > 1
3) CloudSolrClient or LbHttpSolrServer is used by clients.
Without this option, every core broadcasts the distributed query to
a replica of each shard where the replicas are chosen randomly.
This option directs the cores to prefer cores hosted locally, thus
preventing network delays between machines.
This behavior also immunizes a bad/slow machine from slowing down all
the good machines (if those good machines were querying this bad one). 客户端使用 HttpSolrServer 时,建议设为false
-->
<bool name="preferLocalShards">false</bool>
</lst>
<!-- "appends"参数用于指定每次请求都需另外增加的查询条件
注意:客户端无法阻止这个附加的查询条件。所以除非是确实需要每个查询都需要附加此条件,否则不要设置这个参数-->
<!--
<lst name="appends">
<str name="fq">inStock:true</str>
</lst>
-->
<!-- "invariants" 允许Solr维护人员锁定一些选项。
这里设置的参数将覆盖"defaults", "appends" 以及请求中相同的参数. In this example, the facet.field and facet.query params would
be fixed, limiting the facets clients can use. Faceting is
not turned on by default - but if the client does specify
facet=true in the request, these are the only facets they
will be able to see counts for; regardless of what other
facet.field or facet.query params they may specify. -->
<!--
<lst name="invariants">
<str name="facet.field">cat</str>
<str name="facet.field">manu_exact</str>
<str name="facet.query">price:[* TO 500]</str>
<str name="facet.query">price:[500 TO *]</str>
</lst>
-->
<!-- If the default list of SearchComponents is not desired, that
list can either be overridden completely, or components can be
prepended or appended to the default list. (see below)
-->
<!--
<arr name="components">
<str>nameOfCustomComponent1</str>
<str>nameOfCustomComponent2</str>
</arr>
-->
</requestHandler> <!-- 返回格式化过的(字符串有-->
<requestHandler name="/query" class="solr.SearchHandler">
<lst name="defaults">
<str name="echoParams">explicit</str>
<str name="wt">json</str>
<str name="indent">true</str>
<str name="df">text</str>
</lst>
</requestHandler> <!-- A Robust Example This example SearchHandler declaration shows off usage of the
SearchHandler with many defaults declared Note that multiple instances of the same Request Handler
(SearchHandler) can be registered multiple times with different
names (and different init parameters)
-->
<requestHandler name="/browse" class="solr.SearchHandler">
<lst name="defaults">
<str name="echoParams">explicit</str> <!-- VelocityResponseWriter settings -->
<str name="wt">velocity</str>
<str name="v.template">browse</str>
<str name="v.layout">layout</str>
<str name="title">Solritas</str> <!-- Query settings -->
<str name="defType">edismax</str>
<str name="qf">
text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
</str>
<str name="mm">100%</str>
<str name="q.alt">*:*</str>
<str name="rows">10</str>
<str name="fl">*,score</str> <str name="mlt.qf">
text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
</str>
<str name="mlt.fl">text,features,name,sku,id,manu,cat,title,description,keywords,author,resourcename</str>
<int name="mlt.count">3</int> <!-- Faceting defaults -->
<str name="facet">on</str>
<str name="facet.missing">true</str>
<str name="facet.field">cat</str>
<str name="facet.field">manu_exact</str>
<str name="facet.field">content_type</str>
<str name="facet.field">author_s</str>
<str name="facet.query">ipod</str>
<str name="facet.query">GB</str>
<str name="facet.mincount">1</str>
<str name="facet.pivot">cat,inStock</str>
<str name="facet.range.other">after</str>
<str name="facet.range">price</str>
<int name="f.price.facet.range.start">0</int>
<int name="f.price.facet.range.end">600</int>
<int name="f.price.facet.range.gap">50</int>
<str name="facet.range">popularity</str>
<int name="f.popularity.facet.range.start">0</int>
<int name="f.popularity.facet.range.end">10</int>
<int name="f.popularity.facet.range.gap">3</int>
<str name="facet.range">manufacturedate_dt</str>
<str name="f.manufacturedate_dt.facet.range.start">NOW/YEAR-10YEARS</str>
<str name="f.manufacturedate_dt.facet.range.end">NOW</str>
<str name="f.manufacturedate_dt.facet.range.gap">+1YEAR</str>
<str name="f.manufacturedate_dt.facet.range.other">before</str>
<str name="f.manufacturedate_dt.facet.range.other">after</str> <!-- Highlighting defaults -->
<str name="hl">on</str>
<str name="hl.fl">content features title name</str>
<str name="hl.preserveMulti">true</str>
<str name="hl.encoder">html</str>
<str name="hl.simple.pre"><b></str>
<str name="hl.simple.post"></b></str>
<str name="f.title.hl.fragsize">0</str>
<str name="f.title.hl.alternateField">title</str>
<str name="f.name.hl.fragsize">0</str>
<str name="f.name.hl.alternateField">name</str>
<str name="f.content.hl.snippets">3</str>
<str name="f.content.hl.fragsize">200</str>
<str name="f.content.hl.alternateField">content</str>
<str name="f.content.hl.maxAlternateFieldLength">750</str> <!-- Spell checking defaults -->
<str name="spellcheck">on</str>
<str name="spellcheck.extendedResults">false</str>
<str name="spellcheck.count">5</str>
<str name="spellcheck.alternativeTermCount">2</str>
<str name="spellcheck.maxResultsForSuggest">5</str>
<str name="spellcheck.collate">true</str>
<str name="spellcheck.collateExtendedResults">true</str>
<str name="spellcheck.maxCollationTries">5</str>
<str name="spellcheck.maxCollations">3</str>
</lst> <!-- append spellchecking to our list of components -->
<arr name="last-components">
<str>spellcheck</str>
</arr>
</requestHandler> <initParams path="/update/**,/query,/select,/tvrh,/elevate,/spell,/browse">
<lst name="defaults">
<str name="df">text</str>
</lst>
</initParams> <initParams path="/update/json/docs">
<lst name="defaults">
<!--this ensures that the entire json doc will be stored verbatim into one field-->
<str name="srcField">_src_</str>
<!--This means a the uniqueKeyField will be extracted from the fields and
all fields go into the 'df' field. In this config df is already configured to be 'text'
-->
<str name="mapUniqueKeyOnly">true</str>
</lst> </initParams> <!-- The following are implicitly added
<requestHandler name="/update/json" class="solr.UpdateRequestHandler">
<lst name="defaults">
<str name="stream.contentType">application/json</str>
</lst>
</requestHandler>
<requestHandler name="/update/csv" class="solr.UpdateRequestHandler">
<lst name="defaults">
<str name="stream.contentType">application/csv</str>
</lst>
</requestHandler>
--> <!-- Solr Cell Update Request Handler http://wiki.apache.org/solr/ExtractingRequestHandler -->
<requestHandler name="/update/extract"
startup="lazy"
class="solr.extraction.ExtractingRequestHandler" >
<lst name="defaults">
<str name="lowernames">true</str>
<str name="uprefix">ignored_</str> <!-- capture link hrefs but ignore div attributes -->
<str name="captureAttr">true</str>
<str name="fmap.a">links</str>
<str name="fmap.div">ignored_</str>
</lst>
</requestHandler> <!-- Field Analysis Request Handler 主要用于测试分词结果是否和我们期望的一样 Request parameters are:
analysis.fieldname - field name whose analyzers are to be used analysis.fieldtype - field type whose analyzers are to be used
analysis.fieldvalue - text for index-time analysis
q (or analysis.q) - text for query time analysis
analysis.showmatch (true|false) - When set to true and when
query analysis is performed, the produced tokens of the
field value analysis will be marked as "matched" for every
token that is produces by the query analysis
-->
<requestHandler name="/analysis/field"
startup="lazy"
class="solr.FieldAnalysisRequestHandler" /> <!-- Document Analysis Handler http://wiki.apache.org/solr/AnalysisRequestHandler An analysis handler that provides a breakdown of the analysis
process of provided documents. This handler expects a (single)
content stream with the following format: <docs>
<doc>
<field name="id">1</field>
<field name="name">The Name</field>
<field name="text">The Text Value</field>
</doc>
<doc>...</doc>
<doc>...</doc>
...
</docs> Note: Each document must contain a field which serves as the
unique key. This key is used in the returned response to associate
an analysis breakdown to the analyzed document. Like the FieldAnalysisRequestHandler, this handler also supports
query analysis by sending either an "analysis.query" or "q"
request parameter that holds the query text to be analyzed. It
also supports the "analysis.showmatch" parameter which when set to
true, all field tokens that match the query tokens will be marked
as a "match".
-->
<requestHandler name="/analysis/document"
class="solr.DocumentAnalysisRequestHandler"
startup="lazy" /> <!-- Echo the request contents back to the client -->
<requestHandler name="/debug/dump" class="solr.DumpRequestHandler" >
<lst name="defaults">
<str name="echoParams">explicit</str>
<str name="echoHandler">true</str>
</lst>
</requestHandler> <!-- Search Components Search components 被注册到 SolrCore 并且被 SearchHandler 使用
默认情况下,下面的组件可以被使用 <searchComponent name="query" class="solr.QueryComponent" />
<searchComponent name="facet" class="solr.FacetComponent" />
<searchComponent name="mlt" class="solr.MoreLikeThisComponent" />
<searchComponent name="highlight" class="solr.HighlightComponent" />
<searchComponent name="stats" class="solr.StatsComponent" />
<searchComponent name="debug" class="solr.DebugComponent" /> Default configuration in a requestHandler would look like: <arr name="components">
<str>query</str>
<str>facet</str>
<str>mlt</str>
<str>highlight</str>
<str>stats</str>
<str>debug</str>
</arr> 如果注册了一个标准的 searchComponent,则默认的配置将会被覆盖。 以下例子演示了如何在 'standard' components 之前/之后增加一个components: <arr name="first-components">
<str>myFirstComponentName</str>
</arr> <arr name="last-components">
<str>myLastComponentName</str>
</arr> NOTE: The component registered with the name "debug" will
always be executed after the "last-components" --> <!-- Spell Check
拼写检查组件,用于当输入一个不正确的单词时,返回可能的正确的拼写. http://wiki.apache.org/solr/SpellCheckComponent
-->
<searchComponent name="spellcheck" class="solr.SpellCheckComponent"> <str name="queryAnalyzerFieldType">text_general</str> <!-- 这个组件中可以申明多个 "Spell Checkers"
--> <!-- a spellchecker built from a field of the main index -->
<lst name="spellchecker">
<str name="name">default</str>
<str name="field">text</str>
<str name="classname">solr.DirectSolrSpellChecker</str>
<!-- the spellcheck distance measure used, the default is the internal levenshtein -->
<str name="distanceMeasure">internal</str>
<!-- minimum accuracy needed to be considered a valid spellcheck suggestion -->
<float name="accuracy">0.5</float>
<!-- the maximum #edits we consider when enumerating terms: can be 1 or 2 -->
<int name="maxEdits">2</int>
<!-- the minimum shared prefix when enumerating terms -->
<int name="minPrefix">1</int>
<!-- maximum number of inspections per result. -->
<int name="maxInspections">5</int>
<!-- minimum length of a query term to be considered for correction -->
<int name="minQueryLength">4</int>
<!-- maximum threshold of documents a query term can appear to be considered for correction -->
<float name="maxQueryFrequency">0.01</float>
<!-- uncomment this to require suggestions to occur in 1% of the documents
<float name="thresholdTokenFrequency">.01</float>
-->
</lst> <!-- a spellchecker that can break or combine words. See "/spell" handler below for usage -->
<lst name="spellchecker">
<str name="name">wordbreak</str>
<str name="classname">solr.WordBreakSolrSpellChecker</str>
<str name="field">name</str>
<str name="combineWords">true</str>
<str name="breakWords">true</str>
<int name="maxChanges">10</int>
</lst> <!-- a spellchecker that uses a different distance measure -->
<!--
<lst name="spellchecker">
<str name="name">jarowinkler</str>
<str name="field">spell</str>
<str name="classname">solr.DirectSolrSpellChecker</str>
<str name="distanceMeasure">
org.apache.lucene.search.spell.JaroWinklerDistance
</str>
</lst>
--> <!-- a spellchecker that use an alternate comparator comparatorClass be one of:
1. score (default)
2. freq (Frequency first, then score)
3. A fully qualified class name
-->
<!--
<lst name="spellchecker">
<str name="name">freq</str>
<str name="field">lowerfilt</str>
<str name="classname">solr.DirectSolrSpellChecker</str>
<str name="comparatorClass">freq</str>
--> <!-- A spellchecker that reads the list of words from a file -->
<!--
<lst name="spellchecker">
<str name="classname">solr.FileBasedSpellChecker</str>
<str name="name">file</str>
<str name="sourceLocation">spellings.txt</str>
<str name="characterEncoding">UTF-8</str>
<str name="spellcheckIndexDir">spellcheckerFile</str>
</lst>
-->
</searchComponent> <!-- spellcheck component 的使用示例. NOTE: 这纯粹是一个例子. 此处把 SpellCheckComponent 嵌入到 request handler 中是为了
不需要多加一次spellcheck的请求 See http://wiki.apache.org/solr/SpellCheckComponent for details
on the request parameters.
-->
<requestHandler name="/spell" class="solr.SearchHandler" startup="lazy">
<lst name="defaults">
<!-- Solr will use suggestions from both the 'default' spellchecker
and from the 'wordbreak' spellchecker and combine them.
collations (re-written queries) can include a combination of
corrections from both spellcheckers -->
<str name="spellcheck.dictionary">default</str>
<str name="spellcheck.dictionary">wordbreak</str>
<str name="spellcheck">on</str>
<str name="spellcheck.extendedResults">true</str>
<str name="spellcheck.count">10</str>
<str name="spellcheck.alternativeTermCount">5</str>
<str name="spellcheck.maxResultsForSuggest">5</str>
<str name="spellcheck.collate">true</str>
<str name="spellcheck.collateExtendedResults">true</str>
<str name="spellcheck.maxCollationTries">10</str>
<str name="spellcheck.maxCollations">5</str>
</lst>
<arr name="last-components">
<str>spellcheck</str>
</arr>
</requestHandler> <!-- The SuggestComponent in Solr provides users with automatic suggestions for query terms.
You can use this to implement a powerful auto-suggest feature in your search application.
As with the rest of this solrconfig.xml file, the configuration of this component is purely
an example that applies specifically to this configset and example documents. More information about this component and other configuration options are described in the
"Suggester" section of the reference guide available at
http://archive.apache.org/dist/lucene/solr/ref-guide
-->
<searchComponent name="suggest" class="solr.SuggestComponent">
<lst name="suggester">
<str name="name">mySuggester</str>
<str name="lookupImpl">FuzzyLookupFactory</str>
<str name="dictionaryImpl">DocumentDictionaryFactory</str>
<str name="field">cat</str>
<str name="weightField">price</str>
<str name="suggestAnalyzerFieldType">string</str>
<str name="buildOnStartup">false</str>
</lst>
</searchComponent> <requestHandler name="/suggest" class="solr.SearchHandler"
startup="lazy" >
<lst name="defaults">
<str name="suggest">true</str>
<str name="suggest.count">10</str>
</lst>
<arr name="components">
<str>suggest</str>
</arr>
</requestHandler> <!-- Term Vector Component
http://wiki.apache.org/solr/TermVectorComponent
-->
<searchComponent name="tvComponent" class="solr.TermVectorComponent"/> <!-- A request handler for demonstrating the term vector component This is purely as an example. In reality you will likely want to add the component to your
already specified request handlers.
-->
<requestHandler name="/tvrh" class="solr.SearchHandler" startup="lazy">
<lst name="defaults">
<bool name="tv">true</bool>
</lst>
<arr name="last-components">
<str>tvComponent</str>
</arr>
</requestHandler> <!-- Clustering Component You'll need to set the solr.clustering.enabled system property
when running solr to run with clustering enabled:
-Dsolr.clustering.enabled=true https://cwiki.apache.org/confluence/display/solr/Result+Clustering
-->
<searchComponent name="clustering"
enable="${solr.clustering.enabled:false}"
class="solr.clustering.ClusteringComponent" >
<!--
Declaration of "engines" (clustering algorithms). The open source algorithms from Carrot2.org project:
* org.carrot2.clustering.lingo.LingoClusteringAlgorithm
* org.carrot2.clustering.stc.STCClusteringAlgorithm
* org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm
See http://project.carrot2.org/algorithms.html for more information. Commercial algorithm Lingo3G (needs to be installed separately):
* com.carrotsearch.lingo3g.Lingo3GClusteringAlgorithm
--> <lst name="engine">
<str name="name">lingo3g</str>
<bool name="optional">true</bool>
<str name="carrot.algorithm">com.carrotsearch.lingo3g.Lingo3GClusteringAlgorithm</str>
<str name="carrot.resourcesDir">clustering/carrot2</str>
</lst> <lst name="engine">
<str name="name">lingo</str>
<str name="carrot.algorithm">org.carrot2.clustering.lingo.LingoClusteringAlgorithm</str>
<str name="carrot.resourcesDir">clustering/carrot2</str>
</lst> <lst name="engine">
<str name="name">stc</str>
<str name="carrot.algorithm">org.carrot2.clustering.stc.STCClusteringAlgorithm</str>
<str name="carrot.resourcesDir">clustering/carrot2</str>
</lst> <lst name="engine">
<str name="name">kmeans</str>
<str name="carrot.algorithm">org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm</str>
<str name="carrot.resourcesDir">clustering/carrot2</str>
</lst>
</searchComponent> <!-- A request handler for demonstrating the clustering component.
This is meant as an example.
In reality you will likely want to add the component to your
already specified request handlers.
-->
<requestHandler name="/clustering"
startup="lazy"
enable="${solr.clustering.enabled:false}"
class="solr.SearchHandler">
<lst name="defaults">
<bool name="clustering">true</bool>
<bool name="clustering.results">true</bool>
<!-- Field name with the logical "title" of a each document (optional) -->
<str name="carrot.title">name</str>
<!-- Field name with the logical "URL" of a each document (optional) -->
<str name="carrot.url">id</str>
<!-- Field name with the logical "content" of a each document (optional) -->
<str name="carrot.snippet">features</str>
<!-- Apply highlighter to the title/ content and use this for clustering. -->
<bool name="carrot.produceSummary">true</bool>
<!-- the maximum number of labels per cluster -->
<!--<int name="carrot.numDescriptions">5</int>-->
<!-- produce sub clusters -->
<bool name="carrot.outputSubClusters">false</bool> <!-- Configure the remaining request handler parameters. -->
<str name="defType">edismax</str>
<str name="qf">
text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
</str>
<str name="q.alt">*:*</str>
<str name="rows">100</str>
<str name="fl">*,score</str>
</lst>
<arr name="last-components">
<str>clustering</str>
</arr>
</requestHandler> <!-- Terms Component http://wiki.apache.org/solr/TermsComponent A component to return terms and document frequency of those
terms
-->
<searchComponent name="terms" class="solr.TermsComponent"/> <!-- A request handler for demonstrating the terms component -->
<requestHandler name="/terms" class="solr.SearchHandler" startup="lazy">
<lst name="defaults">
<bool name="terms">true</bool>
<bool name="distrib">false</bool>
</lst>
<arr name="components">
<str>terms</str>
</arr>
</requestHandler> <!-- Query Elevation Component http://wiki.apache.org/solr/QueryElevationComponent a search component that enables you to configure the top
results for a given query regardless of the normal lucene
scoring.
-->
<searchComponent name="elevator" class="solr.QueryElevationComponent" >
<!-- pick a fieldType to analyze queries -->
<str name="queryFieldType">string</str>
<str name="config-file">elevate.xml</str>
</searchComponent> <!-- A request handler for demonstrating the elevator component -->
<requestHandler name="/elevate" class="solr.SearchHandler" startup="lazy">
<lst name="defaults">
<str name="echoParams">explicit</str>
</lst>
<arr name="last-components">
<str>elevator</str>
</arr>
</requestHandler> <!-- Highlighting Component http://wiki.apache.org/solr/HighlightingParameters
-->
<searchComponent class="solr.HighlightComponent" name="highlight">
<highlighting>
<!-- Configure the standard fragmenter -->
<!-- This could most likely be commented out in the "default" case -->
<fragmenter name="gap"
default="true"
class="solr.highlight.GapFragmenter">
<lst name="defaults">
<int name="hl.fragsize">100</int>
</lst>
</fragmenter> <!-- A regular-expression-based fragmenter
(for sentence extraction)
-->
<fragmenter name="regex"
class="solr.highlight.RegexFragmenter">
<lst name="defaults">
<!-- slightly smaller fragsizes work better because of slop -->
<int name="hl.fragsize">70</int>
<!-- allow 50% slop on fragment sizes -->
<float name="hl.regex.slop">0.5</float>
<!-- a basic sentence pattern -->
<str name="hl.regex.pattern">[-\w ,/\n\"']{20,200}</str>
</lst>
</fragmenter> <!-- Configure the standard formatter -->
<formatter name="html"
default="true"
class="solr.highlight.HtmlFormatter">
<lst name="defaults">
<str name="hl.simple.pre"><![CDATA[<em>]]></str>
<str name="hl.simple.post"><![CDATA[</em>]]></str>
</lst>
</formatter> <!-- Configure the standard encoder -->
<encoder name="html"
class="solr.highlight.HtmlEncoder" /> <!-- Configure the standard fragListBuilder -->
<fragListBuilder name="simple"
class="solr.highlight.SimpleFragListBuilder"/> <!-- Configure the single fragListBuilder -->
<fragListBuilder name="single"
class="solr.highlight.SingleFragListBuilder"/> <!-- Configure the weighted fragListBuilder -->
<fragListBuilder name="weighted"
default="true"
class="solr.highlight.WeightedFragListBuilder"/> <!-- default tag FragmentsBuilder -->
<fragmentsBuilder name="default"
default="true"
class="solr.highlight.ScoreOrderFragmentsBuilder">
<!--
<lst name="defaults">
<str name="hl.multiValuedSeparatorChar">/</str>
</lst>
-->
</fragmentsBuilder> <!-- multi-colored tag FragmentsBuilder -->
<fragmentsBuilder name="colored"
class="solr.highlight.ScoreOrderFragmentsBuilder">
<lst name="defaults">
<str name="hl.tag.pre"><![CDATA[
<b style="background:yellow">,<b style="background:lawgreen">,
<b style="background:aquamarine">,<b style="background:magenta">,
<b style="background:palegreen">,<b style="background:coral">,
<b style="background:wheat">,<b style="background:khaki">,
<b style="background:lime">,<b style="background:deepskyblue">]]></str>
<str name="hl.tag.post"><![CDATA[</b>]]></str>
</lst>
</fragmentsBuilder> <boundaryScanner name="default"
default="true"
class="solr.highlight.SimpleBoundaryScanner">
<lst name="defaults">
<str name="hl.bs.maxScan">10</str>
<str name="hl.bs.chars">.,!? </str>
</lst>
</boundaryScanner> <boundaryScanner name="breakIterator"
class="solr.highlight.BreakIteratorBoundaryScanner">
<lst name="defaults">
<!-- type should be one of CHARACTER, WORD(default), LINE and SENTENCE -->
<str name="hl.bs.type">WORD</str>
<!-- language and country are used when constructing Locale object. -->
<!-- And the Locale object will be used when getting instance of BreakIterator -->
<str name="hl.bs.language">en</str>
<str name="hl.bs.country">US</str>
</lst>
</boundaryScanner>
</highlighting>
</searchComponent> <!-- Update Processors Chains of Update Processor Factories for dealing with Update
Requests can be declared, and then used by name in Update
Request Processors http://wiki.apache.org/solr/UpdateRequestProcessor -->
<!-- Deduplication An example dedup update processor that creates the "id" field
on the fly based on the hash code of some other fields. This
example has overwriteDupes set to false since we are using the
id field as the signatureField and Solr will maintain
uniqueness based on that anyway. -->
<!--
<updateRequestProcessorChain name="dedupe">
<processor class="solr.processor.SignatureUpdateProcessorFactory">
<bool name="enabled">true</bool>
<str name="signatureField">id</str>
<bool name="overwriteDupes">false</bool>
<str name="fields">name,features,cat</str>
<str name="signatureClass">solr.processor.Lookup3Signature</str>
</processor>
<processor class="solr.LogUpdateProcessorFactory" />
<processor class="solr.RunUpdateProcessorFactory" />
</updateRequestProcessorChain>
--> <!-- Language identification This example update chain identifies the language of the incoming
documents using the langid contrib. The detected language is
written to field language_s. No field name mapping is done.
The fields used for detection are text, title, subject and description,
making this example suitable for detecting languages form full-text
rich documents injected via ExtractingRequestHandler.
See more about langId at http://wiki.apache.org/solr/LanguageDetection
-->
<!--
<updateRequestProcessorChain name="langid">
<processor class="org.apache.solr.update.processor.TikaLanguageIdentifierUpdateProcessorFactory">
<str name="langid.fl">text,title,subject,description</str>
<str name="langid.langField">language_s</str>
<str name="langid.fallback">en</str>
</processor>
<processor class="solr.LogUpdateProcessorFactory" />
<processor class="solr.RunUpdateProcessorFactory" />
</updateRequestProcessorChain>
--> <!-- Script update processor This example hooks in an update processor implemented using JavaScript. See more about the script update processor at http://wiki.apache.org/solr/ScriptUpdateProcessor
-->
<!--
<updateRequestProcessorChain name="script">
<processor class="solr.StatelessScriptUpdateProcessorFactory">
<str name="script">update-script.js</str>
<lst name="params">
<str name="config_param">example config parameter</str>
</lst>
</processor>
<processor class="solr.RunUpdateProcessorFactory" />
</updateRequestProcessorChain>
--> <!-- Response Writers http://wiki.apache.org/solr/QueryResponseWriter Request responses will be written using the writer specified by
the 'wt' request parameter matching the name of a registered
writer. The "default" writer is the default and will be used if 'wt' is
not specified in the request.
-->
<!-- The following response writers are implicitly configured unless
overridden...
-->
<!--
<queryResponseWriter name="xml"
default="true"
class="solr.XMLResponseWriter" />
<queryResponseWriter name="json" class="solr.JSONResponseWriter"/>
<queryResponseWriter name="python" class="solr.PythonResponseWriter"/>
<queryResponseWriter name="ruby" class="solr.RubyResponseWriter"/>
<queryResponseWriter name="php" class="solr.PHPResponseWriter"/>
<queryResponseWriter name="phps" class="solr.PHPSerializedResponseWriter"/>
<queryResponseWriter name="csv" class="solr.CSVResponseWriter"/>
<queryResponseWriter name="schema.xml" class="solr.SchemaXmlResponseWriter"/>
--> <queryResponseWriter name="json" class="solr.JSONResponseWriter">
<!-- For the purposes of the tutorial, JSON responses are written as
plain text so that they are easy to read in *any* browser.
If you expect a MIME type of "application/json" just remove this override.
-->
<str name="content-type">text/plain; charset=UTF-8</str>
</queryResponseWriter> <!--
Custom response writers can be declared as needed...
-->
<queryResponseWriter name="velocity" class="solr.VelocityResponseWriter" startup="lazy">
<str name="template.base.dir">${velocity.template.base.dir:}</str>
</queryResponseWriter> <!-- XSLT response writer transforms the XML output by any xslt file found
in Solr's conf/xslt directory. Changes to xslt files are checked for
every xsltCacheLifetimeSeconds.
-->
<queryResponseWriter name="xslt" class="solr.XSLTResponseWriter">
<int name="xsltCacheLifetimeSeconds">5</int>
</queryResponseWriter> <!-- Query Parsers https://cwiki.apache.org/confluence/display/solr/Query+Syntax+and+Parsing Multiple QParserPlugins can be registered by name, and then
used in either the "defType" param for the QueryComponent (used
by SearchHandler) or in LocalParams
-->
<!-- example of registering a query parser -->
<!--
<queryParser name="myparser" class="com.mycompany.MyQParserPlugin"/>
--> <!-- Function Parsers http://wiki.apache.org/solr/FunctionQuery Multiple ValueSourceParsers can be registered by name, and then
used as function names when using the "func" QParser.
-->
<!-- example of registering a custom function parser -->
<!--
<valueSourceParser name="myfunc"
class="com.mycompany.MyValueSourceParser" />
--> <!-- Document Transformers
http://wiki.apache.org/solr/DocTransformers
-->
<!--
Could be something like:
<transformer name="db" class="com.mycompany.LoadFromDatabaseTransformer" >
<int name="connection">jdbc://....</int>
</transformer> To add a constant value to all docs, use:
<transformer name="mytrans2" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
<int name="value">5</int>
</transformer> If you want the user to still be able to change it with _value:something_ use this:
<transformer name="mytrans3" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
<double name="defaultValue">5</double>
</transformer> If you are using the QueryElevationComponent, you may wish to mark documents that get boosted. The
EditorialMarkerFactory will do exactly that:
<transformer name="qecBooster" class="org.apache.solr.response.transform.EditorialMarkerFactory" />
--> </config>