You are getting search engined It is breaking words into tokens on separators. I had the same problem. You have to change the mappings on room_name. Here is my python code that I used. The key is the “analyzer”: “keyword” lines to use the keyword analyzer. It uses text verbatim and doesn’t tokenize it. If you have already created your index, I’m pretty sure you need to delete it (and all of your data :-() and re-create it. This link might help as well:
# Create the POMA elasticsearch index with any specialized mappings.
def __createPomaIndex(elasticsearchClient):
# Delete any existing index.
elasticsearchClient.indices.delete(index = POMA_INDEX, ignore = [400, 404])
# The default mappings are fine except for keywords that contain special characters in them like "/", "-", ...
# For the keywords with special characters, elasticsearch will break the keyword up into separate segments because
# of the special characters. This screws up looking up an exact match on the keyword. To prevent this, we must
# change the analyzer used to "keyword". This analyzer will use the field's text verbatim. See:
# https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-analyzers.html
#
# We must also set the field data field to true so that we can use the field in term searches.
mapping = {
"mappings": {
"properties": {
"system": {
"properties": {
"cgroup": {
"properties": {
"memory": {
"properties": {
"path": {
"type": "text",
"analyzer": "keyword",
"fielddata": True
}}}}},
"Device": {
"properties": {
"SerialNumber": {
"type": "text",
"analyzer": "keyword",
"fielddata": True
}}}}},
}
}
}
}
}
# Create the index.
response = elasticsearchClient.indices.create(index = POMA_INDEX, body = mapping)
if ("acknowledged" in response) and (response["acknowledged"] == True):
print("Updated mapping for index '{}'.".format(response["index"]))
elif "error" in response:
print("*** ERROR: {}".format( response["error"]["root_cause"]))
print(" Type: {}".format(response["error"]["type"]))