Query caching #15
1 changed files with 58 additions and 35 deletions
|
@ -179,10 +179,22 @@ func (s *Server) Search(ctx context.Context, in *pb.SearchRequest) (*pb.Outputs,
|
||||||
var pageSize = 10
|
var pageSize = 10
|
||||||
No, on seconds though, I don't think so. We accept a single ES index as an argument, and in all our use cases I believe we only ever have to deal with one. No, on seconds though, I don't think so. We accept a single ES index as an argument, and in all our use cases I believe we only ever have to deal with one.
Done. Done.
|
|||||||
var orderBy []orderField
|
var orderBy []orderField
|
||||||
var searchIndices []string
|
var searchIndices []string
|
||||||
|
var searchResult *elastic.SearchResult = nil
|
||||||
client := s.EsClient
|
client := s.EsClient
|
||||||
searchIndices = make([]string, 0, 1)
|
searchIndices = make([]string, 0, 1)
|
||||||
searchIndices = append(searchIndices, s.Args.EsIndex)
|
searchIndices = append(searchIndices, s.Args.EsIndex)
|
||||||
|
|
||||||
|
cacheHit := false
|
||||||
|
var cachedRecords []*record
|
||||||
|
/*
|
||||||
|
TODO: cache based on search request params
|
||||||
|
include from value and number of results.
|
||||||
|
When another search request comes in with same search params
|
||||||
|
and same or increased offset (which we currently don't even use?)
|
||||||
|
that will be a cache hit.
|
||||||
|
*/
|
||||||
|
|
||||||
|
if !cacheHit {
|
||||||
q := elastic.NewBoolQuery()
|
q := elastic.NewBoolQuery()
|
||||||
|
|
||||||
err := s.checkQuery(in)
|
err := s.checkQuery(in)
|
||||||
|
@ -202,7 +214,7 @@ func (s *Server) Search(ctx context.Context, in *pb.SearchRequest) (*pb.Outputs,
|
||||||
search = search.Sort(x.Field, x.IsAsc)
|
search = search.Sort(x.Field, x.IsAsc)
|
||||||
}
|
}
|
||||||
|
|
||||||
searchResult, err := search.Do(ctx) // execute
|
searchResult, err = search.Do(ctx) // execute
|
||||||
if err != nil && elastic.IsNotFound(err) {
|
if err != nil && elastic.IsNotFound(err) {
|
||||||
log.Println("Index returned 404! Check writer. Index: ", searchIndices)
|
log.Println("Index returned 404! Check writer. Index: ", searchIndices)
|
||||||
return &pb.Outputs{}, nil
|
return &pb.Outputs{}, nil
|
||||||
|
@ -215,7 +227,13 @@ func (s *Server) Search(ctx context.Context, in *pb.SearchRequest) (*pb.Outputs,
|
||||||
|
|
||||||
log.Printf("%s: found %d results in %dms\n", in.Text, len(searchResult.Hits.Hits), searchResult.TookInMillis)
|
log.Printf("%s: found %d results in %dms\n", in.Text, len(searchResult.Hits.Hits), searchResult.TookInMillis)
|
||||||
|
|
||||||
txos, extraTxos, blocked := s.postProcessResults(ctx, client, searchResult, in, pageSize, from, searchIndices)
|
cachedRecords = make([]*record, 0, 0)
|
||||||
|
} else {
|
||||||
|
//TODO fill cached records here
|
||||||
|
cachedRecords = make([]*record, 0, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
txos, extraTxos, blocked := s.postProcessResults(ctx, client, searchResult, in, pageSize, from, searchIndices, cachedRecords)
|
||||||
|
|
||||||
t1 := time.Now()
|
t1 := time.Now()
|
||||||
|
|
||||||
|
@ -272,13 +290,15 @@ func (s *Server) postProcessResults(
|
||||||
in *pb.SearchRequest,
|
in *pb.SearchRequest,
|
||||||
pageSize int,
|
pageSize int,
|
||||||
from int,
|
from int,
|
||||||
searchIndices []string) ([]*pb.Output, []*pb.Output, []*pb.Blocked) {
|
searchIndices []string,
|
||||||
|
cachedRecords []*record) ([]*pb.Output, []*pb.Output, []*pb.Blocked) {
|
||||||
var txos []*pb.Output
|
var txos []*pb.Output
|
||||||
var records []*record
|
var records []*record
|
||||||
var blockedRecords []*record
|
var blockedRecords []*record
|
||||||
var blocked []*pb.Blocked
|
var blocked []*pb.Blocked
|
||||||
var blockedMap map[string]*pb.Blocked
|
var blockedMap map[string]*pb.Blocked
|
||||||
|
|
||||||
|
if len(cachedRecords) < 0 {
|
||||||
records = make([]*record, 0, searchResult.TotalHits())
|
records = make([]*record, 0, searchResult.TotalHits())
|
||||||
|
|
||||||
var r record
|
var r record
|
||||||
|
@ -287,6 +307,9 @@ func (s *Server) postProcessResults(
|
||||||
records = append(records, &t)
|
records = append(records, &t)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
records = cachedRecords
|
||||||
|
}
|
||||||
|
|
||||||
//printJsonFullResults(searchResult)
|
//printJsonFullResults(searchResult)
|
||||||
records, blockedRecords, blockedMap = removeBlocked(records)
|
records, blockedRecords, blockedMap = removeBlocked(records)
|
||||||
|
|
Loading…
Reference in a new issue
does this need fixing?
i generally don't like swallowing errors. at the very least you should log it