Java实现对es7.0中的二个字段去重查询,并map输出
时间: 2023-06-13 19:03:58 浏览: 121
java使用es查询的示例代码
可以使用Elasticsearch的聚合(Aggregation)功能来实现对两个字段的去重查询,具体步骤如下:
1. 构造查询条件,使用bool查询同时匹配两个字段,例如:
```
{
"query": {
"bool": {
"must": [
{ "match": { "field1": "value1" } },
{ "match": { "field2": "value2" } }
]
}
}
}
```
2. 使用聚合(Aggregation)功能按照两个字段进行分组去重,例如:
```
{
"aggs": {
"distinct_field1": {
"terms": {
"field": "field1",
"size": 1000
},
"aggs": {
"distinct_field2": {
"terms": {
"field": "field2",
"size": 1000
}
}
}
}
}
}
```
3. 对聚合结果进行解析,将去重后的结果按照需要输出为Map类型即可。
下面是一个完整的Java实现示例:
```
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.bucket.terms.StringTerms;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
import org.elasticsearch.search.aggregations.metrics.tophits.TopHits;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class EsQueryDemo {
public static void main(String[] args) throws IOException {
// 创建客户端连接
RestHighLevelClient client = new RestHighLevelClient();
// 构造查询条件
BoolQueryBuilder queryBuilder = QueryBuilders.boolQuery()
.must(QueryBuilders.matchQuery("field1", "value1"))
.must(QueryBuilders.matchQuery("field2", "value2"));
// 构造聚合查询
SearchSourceBuilder sourceBuilder = new SearchSourceBuilder()
.query(queryBuilder)
.aggregation(AggregationBuilders.terms("distinct_field1").field("field1")
.size(1000)
.subAggregation(AggregationBuilders.terms("distinct_field2").field("field2")
.size(1000)
.subAggregation(AggregationBuilders.topHits("top_hit").size(1))));
// 构造查询请求
SearchRequest searchRequest = new SearchRequest("index_name");
searchRequest.source(sourceBuilder)
.scroll(new TimeValue(60000));
// 执行查询
SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT);
SearchHits searchHits = searchResponse.getHits();
// 解析聚合结果
Map<String, List<Map<String, Object>>> result = new HashMap<>();
Terms terms1 = searchResponse.getAggregations().get("distinct_field1");
for (Terms.Bucket bucket1 : terms1.getBuckets()) {
String field1 = bucket1.getKeyAsString();
List<Map<String, Object>> list = new ArrayList<>();
result.put(field1, list);
Terms terms2 = bucket1.getAggregations().get("distinct_field2");
for (Terms.Bucket bucket2 : terms2.getBuckets()) {
TopHits topHits = bucket2.getAggregations().get("top_hit");
SearchHit hit = topHits.getHits().getAt(0);
Map<String, Object> map = new HashMap<>();
list.add(map);
hit.getSourceAsMap().forEach((key, value) -> map.put(key, value));
}
}
// 输出结果
result.forEach((key, value) -> {
System.out.println(key);
value.forEach(System.out::println);
});
// 关闭客户端连接
client.close();
}
}
```
阅读全文