analyzerConfig
Lucene Analyzer Schema
|
LuceneTextAnalyzer schema for tokenization (JSON-encoded)
type: string
default value: '{ "analyzers": [{ "name": "StdTokLowerStop","charFilters": [ { "type": "htmlstrip" } ],"tokenizer": { "type": "standard" },"filters": [{ "type": "lowercase" },{ "type": "KStem" },{ "type": "length", "min": "2", "max": "32767" },{ "type": "fusionstop", "ignoreCase": "true", "format": "snowball", "words": "org/apache/lucene/analysis/snowball/english_stop.txt" }] }],"fields": [{ "regex": ".+", "analyzer": "StdTokLowerStop" } ]} '
|
dataFormat
Data format
|
Spark-compatible format which training data comes in (like 'solr', 'hdfs', 'file', 'parquet' etc)
type: string
default value: 'solr '
enum: {
solr
hdfs
file
parquet
}
|
fieldToVectorize
Field to Vectorize
required
|
Solr field containing text training data. Data from multiple fields with different weights can be combined by specifying them as field1:weight1,field2:weight2 etc.
type: string
minLength: 1
|
id
Spark Job ID
required
|
The ID for this Spark job. Used in the API to reference this job. Allowed characters: a-z, A-Z, dash (-) and underscore (_). Maximum length: 63 characters.
type: string
maxLength: 63
pattern: ^[A-Za-z0-9_\-]+$
|
maxDF
Max Term Document Frequency
|
To be kept, terms must occur in no more than this number of documents (if > 1.0), or no more than this fraction of documents (if <= 1.0)
type: number
default value: '1.0 '
|
minDF
Minimum Term Document Frequency
|
To be kept, terms must occur in at least this number of documents (if > 1.0), or at least this fraction of documents (if <= 1.0)
type: number
default value: '0.0 '
|
minSparkPartitions
Minimum Number of Spark Partitions
|
Minimum number of Spark partitions for training job.
type: integer
default value: '200 '
exclusiveMinimum: false
minimum: 1
|
modelId
Model ID
|
Identifier for the model to be trained; uses the supplied Spark Job ID if not provided.
type: string
minLength: 1
|
norm
Vector normalization
|
p-norm to normalize vectors with (choose -1 to turn normalization off)
type: integer
default value: '2 '
enum: {
-1
0
1
2
}
|
numRelatedTerms
Number of Related Words
|
For each collection of input words, find this many word2vec-related words
type: integer
default value: '10 '
exclusiveMinimum: false
minimum: 1
|
outputCollection
Output Collection
required
|
Solr Collection to store model-labeled data to
type: string
|
outputField
Output Field
|
Solr field which will contain terms which the word2vec model considers are related to the input
type: string
default value: 'related_terms_txt '
|
overwriteExistingModel
Overwrite existing model
|
If a model exists in the model store, overwrite when this job runs
type: boolean
default value: 'true '
|
overwriteOutput
Overwrite Output
|
Overwrite output collection
type: boolean
default value: 'true '
|
predictedLabelField
Word2Vec Feature Field
|
Solr field which will contain vector features when the word2vec model is applied to documents
type: string
default value: 'w2vFeatures '
|
randomSeed
Random seed
|
For any deterministic pseudorandom number generation
type: integer
default value: '1234 '
|
serializeAsMleap
Serialize as Mleap Bundle
|
Serialize the output model as Mleap Bundle
type: boolean
default value: 'true '
|
sourceFields
Fields to Load
|
Solr fields to load (comma-delimited). Leave empty to allow the job to select the required fields to load at runtime.
type: string
|
sparkConfig
Spark Settings
|
Spark configuration settings.
type: array of object
object attributes: {
key
(required)
: {
display name: Parameter Name
type: string
}
value
: {
display name: Parameter Value
type: string
}
}
|
stopwordsList
List of stopwords
|
Stopwords defined in Lucene analyzer config
type: array of string
|
trainingCollection
Training Collection
required
|
Solr Collection containing labeled training data
type: string
minLength: 1
|
trainingDataFilterQuery
Training data filter query
|
Solr query to use when loading training data
type: string
default value: '*:* '
minLength: 3
|
trainingDataFrameConfigOptions
Dataframe Config Options
|
Additional spark dataframe loading configuration options
type: object
object attributes: {
}
object attributes: {
}
|
trainingDataSamplingFraction
Training data sampling fraction
|
Fraction of the training data to use
type: number
default value: '1.0 '
exclusiveMaximum: false
maximum: 1.0
|
type
Spark Job Type
required
|
type: string
default value: 'word2vec '
enum: {
word2vec
}
|
uidField
ID Field Name
|
Field containing the unique ID for each document
type: string
minLength: 1
|
w2vDimension
Embedding Dimension
|
Word-vector dimensionality to represent text
type: integer
default value: '50 '
exclusiveMinimum: false
minimum: 0
|
w2vMaxIter
Max Iterations
|
Maximum number of iterations of the word2vec training
type: integer
default value: '1 '
|
w2vMaxSentenceLength
Max Sentence Length
|
Sets the maximum length (in words) of each sentence in the input data. Any sentence longer than this threshold will be divided into chunks of up to `maxSentenceLength` size.
type: integer
default value: '1000 '
exclusiveMinimum: false
minimum: 3
|
w2vStepSize
Step Size
|
Training parameter for word2vec convergence (change at your own peril)
type: number
default value: '0.025 '
exclusiveMinimum: false
minimum: 0.005
|
w2vWindowSize
Window Size
|
The window size (context words from [-window, window]) for word2vec
type: integer
default value: '5 '
exclusiveMinimum: false
minimum: 3
|
withIdf
IDF Weighting
|
Weight vector components based on inverse document frequency
type: boolean
default value: 'true '
|