Skip to content

Commit

Permalink
Merge branch 'main' into jetty12_ee8
Browse files Browse the repository at this point in the history
  • Loading branch information
iamsanjay committed Jan 19, 2025
2 parents 98e7f9b + af5fea7 commit a30414d
Show file tree
Hide file tree
Showing 55 changed files with 110 additions and 85 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/bin-solr-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-latest

env:
DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }}

steps:
# Setup
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/docker-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ jobs:
env:
SOLR_DOCKER_IMAGE_REPO: github-pr/solr
SOLR_DOCKER_IMAGE_TAG: ${{github.event.number}}
DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }}

steps:
# Setup
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/gradle-precommit.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest

env:
DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }}

steps:
# Setup
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/solrj-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ jobs:
runs-on: ubuntu-latest

env:
DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }}

steps:
# Setup
Expand Down
2 changes: 1 addition & 1 deletion dev-docs/FAQ.adoc
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ If you don't yet have an account, you have to ask for one in the 'users' or 'dev
=== Where can I find information about test history?

* http://fucit.org/solr-jenkins-reports/failure-report.html
* https://ge.apache.org/scans/tests?search.relativeStartTime=P90D&search.rootProjectNames=solr*
* https://develocity.apache.org/scans/tests?search.relativeStartTime=P90D&search.rootProjectNames=solr*
* https://lists.apache.org[Solr mailing list archives especially builds]

=== How can I build the JavaDoc's and the Reference Guide?
Expand Down
2 changes: 1 addition & 1 deletion gradle/develocity.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def isCIBuild = System.getenv().keySet().any { it ==~ /(?i)((JENKINS|HUDSON)(_\w
// https://docs.gradle.com/enterprise/gradle-plugin/

develocity {
server = "https://ge.apache.org"
server = "https://develocity.apache.org"
projectId = "solr"

buildScan {
Expand Down
2 changes: 1 addition & 1 deletion gradle/template.gradle.properties
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ systemProp.file.encoding=UTF-8
# Set up gradle JVM defaults.
#
# We also open up internal compiler modules for spotless/ google java format.
org.gradle.jvmargs=-Xmx1g -XX:TieredStopAtLevel=1 -XX:+UseParallelGC -XX:ActiveProcessorCount=1 \
org.gradle.jvmargs=-Xmx1g -XX:ReservedCodeCacheSize=256m -XX:TieredStopAtLevel=1 -XX:+UseParallelGC -XX:ActiveProcessorCount=1 \
--add-exports jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED \
--add-exports jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED \
--add-exports jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED \
Expand Down
2 changes: 1 addition & 1 deletion gradle/testing/failed-tests-at-end.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def failedTests = new LinkedHashSet() // for dedupe due to weird afterTest class
def genFailInfo(def task, TestDescriptor desc) {
boolean isSuite = (desc.name == 'classMethod')
def name = isSuite ? desc.className : "${desc.className}.${desc.name}"
def historyUrl = "https://ge.apache.org/scans/tests?search.rootProjectNames=solr-root&tests.container=$desc.className"
def historyUrl = "https://develocity.apache.org/scans/tests?search.rootProjectNames=solr-root&tests.container=$desc.className"
if (!isSuite) { // is test method specific
historyUrl += "&tests.test=$desc.name"
historyUrl += " http://fucit.org/solr-jenkins-reports/history-trend-of-recent-failures.html#series/$name"
Expand Down
2 changes: 1 addition & 1 deletion settings.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ pluginManagement {
}

plugins {
id 'com.gradle.develocity' version '3.18.1'
id 'com.gradle.develocity' version '3.18.2'
id 'com.gradle.common-custom-user-data-gradle-plugin' version '2.0.2'
}

Expand Down
3 changes: 3 additions & 0 deletions solr/CHANGES.txt
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,9 @@ Other Changes

* SOLR-17589: Prevent error log entry on solr server due to initial HEAD request from HttpJdkSolrClient. (Paul Blanchaert via James Dyer)

* SOLR-17611: SolrJ's User-Agent header now uses the version of SolrJ. There's a corresponding
HttpSolrCall.getUserAgentSolrVersion to parse it. (David Smiley)

================== 9.8.0 ==================
New Features
---------------------
Expand Down
19 changes: 19 additions & 0 deletions solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@
import org.apache.http.entity.InputStreamEntity;
import org.apache.solr.api.ApiBag;
import org.apache.solr.api.V2HttpCall;
import org.apache.solr.client.api.util.SolrVersion;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.impl.HttpClientUtil;
import org.apache.solr.common.SolrException;
Expand Down Expand Up @@ -1170,6 +1171,24 @@ protected Object _getHandler() {
return handler;
}

/**
* Gets the client (user-agent) SolrJ version, or null if isn't SolrJ. Note that older SolrJ
* clients prior to 9.9 present themselves as 1.0 or 2.0.
*/
public SolrVersion getUserAgentSolrVersion() {
String header = req.getHeader("User-Agent");
if (header == null || !header.startsWith("Solr")) {
return null;
}
try {
return SolrVersion.valueOf(header.substring(header.lastIndexOf(' ') + 1));
} catch (Exception e) {
// unexpected but let's not freak out
assert false : e.toString();
return null;
}
}

private AuthorizationContext getAuthCtx() {

String resource = getPath();
Expand Down
2 changes: 1 addition & 1 deletion solr/solr-ref-guide/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ task startUIBuildDir(type: Copy) {
group = 'Site - UI'
dependsOn tasks.downloadDefaultUITemplate

// Copy the antora latest default ui into the root folder, to mimick a new checkout of the default ui
// Copy the antora latest default ui into the root folder, to mimic a new checkout of the default ui
with {
from "${project.ext.nodeProjectDir}/node_modules/@antora/ui-default"
duplicatesStrategy = 'EXCLUDE'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

@startuml

' Note: in TEOZ mode som skinparams don't work
' Note: in TEOZ mode some skinparams don't work
' and currently notes on messages can't be customized -
' but this mode creates more compact layout, enable if needed

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -828,7 +828,7 @@ http://localhost:8983/solr/admin/cores?action=REQUESTSTATUS&requestid=1
[[coreadmin-requestrecovery]]
== REQUESTRECOVERY

The `REQUESTRECOVERY` action manually asks a core to recover by synching with the leader.
The `REQUESTRECOVERY` action manually asks a core to recover by syncing with the leader.
This should be considered an "expert" level command and should be used in situations where the node (SorlCloud replica) is unable to become active automatically.

`admin/cores?action=REQUESTRECOVERY&core=_core-name_`
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,7 @@ These settings affect how or when updates are made to an index.
=== deletionPolicy

Controls how commits are retained in case of rollback.
The default is `SolrDeletionPolicy`, which take ths following parameters:
The default is `SolrDeletionPolicy`, which takes the following parameters:

`maxCommitsToKeep`::
+
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ The content of this element will be sent as the value of the Cache-Control heade
This header is used to modify the default caching behavior of the requesting client.
The possible values for the Cache-Control header are defined by the HTTP 1.1 specification in http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9[Section 14.9].

Setting the max-age field controls how long a client may re-use a cached response before requesting it again from the server.
Setting the max-age field controls how long a client may reuse a cached response before requesting it again from the server.
This time interval should be set according to how often you update your index and whether or not it is acceptable for your application to use content that is somewhat out of date.
Setting `must-revalidate` will tell the client to validate with the server that its cached copy is still good before re-using it.
This will ensure that the most timely result is used, while avoiding a second fetch of the content if it isn't needed, at the cost of a request to the server to do the check.
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ The name can be anything other than `schema.xml`, as that name is reserved for t
=== ClassicIndexSchemaFactory

An alternative to using a managed schema is to explicitly configure a `ClassicIndexSchemaFactory`.
This requires the use of a `schema.xml` file, and disallows any programatic changes to the Schema at run time.
This requires the use of a `schema.xml` file, and disallows any programmatic changes to the Schema at run time.
The `schema.xml` file must be edited manually and is loaded only when the collection is loaded.

[source,xml]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ Alternatively, the processor offers a "permissive" mode (`permissiveMode=true`)

{solr-javadocs}/core/org/apache/solr/update/processor/NumFieldLimitingUpdateRequestProcessorFactory.html[NumFieldLimitingUpdateRequestProcessorFactory]:: Fails update requests once a core has exceeded a configurable "maximum" number of fields.
+
Solr performance can degrade and even become unstable if cores accumulate too many (e.g. more than 500) fields. The "NumFieldLimiting" URP is offered as a safeguard that helps users notice potentially-dangerous schema design and/or mis-use of dynamic fields, before these performance and stability problems would manifest.
Solr performance can degrade and even become unstable if cores accumulate too many (e.g. more than 500) fields. The "NumFieldLimiting" URP is offered as a safeguard that helps users notice potentially-dangerous schema design and/or misuse of dynamic fields, before these performance and stability problems would manifest.
Note that the field count an index reports can be influenced by deleted (but not yet purged) documents, and may vary from replica to replica.
In order to avoid these sort of discrepancies between replicas, use of this URP should almost always precede DistributedUpdateProcessor in when running in SolrCloud mode.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ To support rolling restarts from older versions, Solr can be configured to accep

If the `SolrAuthV2` header is present but fails validation, then Solr will not fall back to checking `SolrAuth`. The legacy authentication headers will only be consulted when the newest headers are not present.

Unkown values for `solr.pki.acceptVersion` will emit a warning log message but will not cause errors to more smoothly support future protocol revisions.
Unknown values for `solr.pki.acceptVersion` will emit a warning log message but will not cause errors to more smoothly support future protocol revisions.

The timeout is configurable through a system property called `pkiauth.ttl`.
For example, if you wish to increase the time-to-live to 10 seconds (10,000 milliseconds), start each node with a property `'-Dpkiauth.ttl=10000'`.
Original file line number Diff line number Diff line change
Expand Up @@ -726,13 +726,13 @@ When using the Backup & Restore Collections API Calls, the **`location`** option
All the above options will resolve to the same directory in your S3 Bucket: `dir/in/bucket`.
Pre-Creation::
The location must already exist within your S3 bucket before you can preform any backup operations with that location.
The location must already exist within your S3 bucket before you can perform any backup operations with that location.
+
**Please note that the directory you create in S3 cannot begin with a `/`, as `locations` are stripped of any `/` prefix (shown in `Location Format`).**
Empty Location::
If you do not want to use a sub-directory within your bucket to store your backup, you can use any of the following location options: `/`, `s3:/`, `s3://`.
However the location option is mandatory and you will recieve an error when trying to perform backup operations without it.
However the location option is mandatory and you will receive an error when trying to perform backup operations without it.
====

An example configuration to enable S3 backups and restore can be seen below:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ There are several options defined in this example:
<2> The parameter `"blockUnknown":true` means that unauthenticated requests are not allowed to pass through.
<3> A user called 'solr', with a password `'SolrRocks'`, in the encoded format detailed above, has been defined.
<4> We override the `realm` property to display another text on the login prompt.
<5> The parameter `"forwardCredentials":false` means we let Solr's PKI authenticaion handle distributed request instead of forwarding the Basic Auth header.
<5> The parameter `"forwardCredentials":false` means we let Solr's PKI authentication handle distributed request instead of forwarding the Basic Auth header.

Save your settings to a file called `security.json` locally.
If you are using Solr in single-node installation, you should put this file in `$SOLR_HOME`.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2256,7 +2256,7 @@ In this example:
This is considered successful.
* In the "successes" section, core_node23 was _not_ the leader for shard3, so leadership was assigned to that replica.

The "Summary" section with the "Success" tag indicates that the command rebalanced all _active_ replicas with the preferredLeader property set as requried.
The "Summary" section with the "Success" tag indicates that the command rebalanced all _active_ replicas with the preferredLeader property set as required.
If a replica cannot be made leader due to not being healthy (for example, it is on a Solr instance that is not running), it's also considered success.

[source,json]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ tar zxf solr-{solr-full-version}.tgz

Once extracted, you are now ready to run Solr using the instructions provided in the <<Starting Solr>> section below.

TIP: Windows includes the `tar` tool since Windows 10. Open a command line window and execute the above comamnd. There are also several 3rd party un-archiving tools that support `.tar` archives.
TIP: Windows includes the `tar` tool since Windows 10. Open a command line window and execute the above command. There are also several 3rd party un-archiving tools that support `.tar` archives.

== Directory Layout

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -618,7 +618,7 @@ Any metrics aggregations and/or filtering must be done on Grafana or the Prometh

=== Prometheus Configuration

Like the Prometheus Exporter, the `prometheus.yml` needs to be configurated for the Prometheus Server to ingest metrics.
Like the Prometheus Exporter, the `prometheus.yml` needs to be configured for the Prometheus Server to ingest metrics.
The difference is it must instead scrape the Metrics API with the `wt=prometheus` parameter directly from each host/port Solr is running on as in this example:

[source,plain]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ The authorization plugins that ship with Solr are:

== Audit Logging

Audit logging will record an audit trail of incoming reqests to your cluster, such as users being denied access to admin APIs.
Audit logging will record an audit trail of incoming requests to your cluster, such as users being denied access to admin APIs.
Learn more about audit logging and how to implement an audit logger plugin in the section xref:audit-logging.adoc[].

== IP Access Control
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1385,7 +1385,7 @@ Trailing wildcards are supported when copying from localand must be quoted.

Other examples are:

*Recursively copy a directory from local to ZooKeeper*: `bin/solr zk cp -r file:/apache/confgs/whatever/conf zk:/configs/myconf -z 111.222.333.444:2181`
*Recursively copy a directory from local to ZooKeeper*: `bin/solr zk cp -r file:/apache/configs/whatever/conf zk:/configs/myconf -z 111.222.333.444:2181`

*Copy a single file from ZooKeeper to local*: `bin/solr zk cp zk:/configs/myconf/managed_schema /configs/myconf/managed_schema -z 111.222.333.444:2181`

Expand Down Expand Up @@ -1834,7 +1834,7 @@ The `package` command allows you to interact with Solr's xref:configuration-guid

== Snapshots and Backups

The snapshots capablity of the CLI allows you to:
The snapshots capability of the CLI allows you to:

* Create snapshotted view of your index
* List all the available snapshots
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ bin/solr start -Dsolr.directoryFactory=HdfsDirectoryFactory

This command starts Solr using the defined JVM properties.

=== User-Managed Cluters and Single-Node Installations
=== User-Managed Clusters and Single-Node Installations

For For user-managed clusters or single-node Solr installations, there are a few parameters you should modify before starting Solr.
These can be set in `solrconfig.xml` (more on that <<HdfsDirectoryFactory Parameters,below>>), or passed to the `bin/solr` script at startup.
Expand Down Expand Up @@ -246,7 +246,7 @@ s|Required |Default: none
The Kerberos principal that Solr should use to authenticate to secure Hadoop; the format of a typical Kerberos V5 principal is: `primary/instance@realm`.

== Update Log settings
When using HDFS to store Solr indexes, it is recommended to also store the transaction logs on HDFS. This can be done by using the `solr.HdfsUpdateLog` update log hander class.
When using HDFS to store Solr indexes, it is recommended to also store the transaction logs on HDFS. This can be done by using the `solr.HdfsUpdateLog` update log handler class.
The solrconfig.xml is often used to define an update log handler class name either using a variable reference or direct specification, for example:

[source,xml]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ For example, you might have a collection where the "country" field of each docum
A different collection might simply use a "hash" on the uniqueKey of each document to determine its Shard.

There is support for distributing both the index process and the queries automatically, and ZooKeeper provides failover and load balancing.
As well as supporting replication, automatic index spliting to shards, there is support for automatic routing of documents to specific shards by a sharding strategy.
As well as supporting replication, automatic index splitting into shards, there is support for automatic routing of documents to specific shards by a sharding strategy.
Additionally, every shard can have multiple replicas for additional robustness.

== Leaders and Replicas
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ Unless otherwise specified, SolrJ expects these URLs to point to the root Solr p

A few notable exceptions to this are described below:

- *Http2SolrClient* - Users of `Http2SolrClient` may choose to skip providing a root URL to their client, in favor of specifing the URL as a argument for the `Http2SolrClient.requestWithBaseUrl` method.
- *Http2SolrClient* - Users of `Http2SolrClient` may choose to skip providing a root URL to their client, in favor of specifying the URL as an argument for the `Http2SolrClient.requestWithBaseUrl` method.
Calling any other `request` methods on a URL-less `Http2SolrClient` will result in an `IllegalArgumentException`.
- *LBHttpSolrClient* and *LBHttp2SolrClient* - Solr's "load balancing" clients are frequently used to round-robin requests across a set of replicas or cores.
URLs are still expected to point to the Solr root (i.e. "/solr"), but to support this use-case the URLs are often supplemented by an additional parameter to specify the targeted core.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -422,7 +422,7 @@ Then, change or add this line to the file:
vm.swappiness = 1
----

Alternatively, you can change the setting temporarily by running this comamnd:
Alternatively, you can change the setting temporarily by running this command:

[source,bash]
----
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -454,9 +454,9 @@ Attempting to do so while Solr is running may result in inconsistent state and s
The VM properties `zkCredentialsInjector`, `zkACLProvider` and `zkCredentialsProvider`, included in the `SOLR_ZK_CREDS_AND_ACLS` environment variable in `solr.in.*`, control the conversion:

* The Credentials Injector reads the credentials and pass them to the Credentials Provider.
When omitted, the process will use no credentials (suitable for an unsecure configuration).
When omitted, the process will use no credentials (suitable for an insecure configuration).
* The Credentials Provider uses the credentials of the user with admin privileges on the nodes.
When omitted, the process will use no credentials (suitable for an unsecure configuration).
When omitted, the process will use no credentials (suitable for an insecure configuration).
* The ACL Provider will be used to compute the new ACLs.
When omitted, the process will set all permissions to all users, removing any security present.

Expand Down
Loading

0 comments on commit a30414d

Please sign in to comment.