Query caching #15

Merged
jeffreypicard merged 6 commits from query-caching into master 2021-10-05 00:05:44 +02:00
Showing only changes of commit 076adcca39 - Show all commits

View file

@ -179,10 +179,22 @@ func (s *Server) Search(ctx context.Context, in *pb.SearchRequest) (*pb.Outputs,
var pageSize = 10
lyoshenka commented 2021-10-04 20:53:46 +02:00 (Migrated from github.com)
Review

does this need fixing?

does this need fixing?
lyoshenka commented 2021-10-04 20:55:16 +02:00 (Migrated from github.com)
Review

i generally don't like swallowing errors. at the very least you should log it

i generally don't like swallowing errors. at the very least you should log it
jeffreypicard commented 2021-10-04 21:59:06 +02:00 (Migrated from github.com)
Review

No, on seconds though, I don't think so. We accept a single ES index as an argument, and in all our use cases I believe we only ever have to deal with one.

No, on seconds though, I don't think so. We accept a single ES index as an argument, and in all our use cases I believe we only ever have to deal with one.
jeffreypicard commented 2021-10-05 00:02:25 +02:00 (Migrated from github.com)
Review

Done.

Done.
var orderBy []orderField
var searchIndices []string
var searchResult *elastic.SearchResult = nil
client := s.EsClient
searchIndices = make([]string, 0, 1)
searchIndices = append(searchIndices, s.Args.EsIndex)
cacheHit := false
var cachedRecords []*record
/*
TODO: cache based on search request params
include from value and number of results.
When another search request comes in with same search params
and same or increased offset (which we currently don't even use?)
that will be a cache hit.
*/
if !cacheHit {
q := elastic.NewBoolQuery()
err := s.checkQuery(in)
@ -202,7 +214,7 @@ func (s *Server) Search(ctx context.Context, in *pb.SearchRequest) (*pb.Outputs,
search = search.Sort(x.Field, x.IsAsc)
}
searchResult, err := search.Do(ctx) // execute
searchResult, err = search.Do(ctx) // execute
if err != nil && elastic.IsNotFound(err) {
log.Println("Index returned 404! Check writer. Index: ", searchIndices)
return &pb.Outputs{}, nil
@ -215,7 +227,13 @@ func (s *Server) Search(ctx context.Context, in *pb.SearchRequest) (*pb.Outputs,
log.Printf("%s: found %d results in %dms\n", in.Text, len(searchResult.Hits.Hits), searchResult.TookInMillis)
txos, extraTxos, blocked := s.postProcessResults(ctx, client, searchResult, in, pageSize, from, searchIndices)
cachedRecords = make([]*record, 0, 0)
} else {
//TODO fill cached records here
cachedRecords = make([]*record, 0, 0)
}
txos, extraTxos, blocked := s.postProcessResults(ctx, client, searchResult, in, pageSize, from, searchIndices, cachedRecords)
t1 := time.Now()
@ -272,13 +290,15 @@ func (s *Server) postProcessResults(
in *pb.SearchRequest,
pageSize int,
from int,
searchIndices []string) ([]*pb.Output, []*pb.Output, []*pb.Blocked) {
searchIndices []string,
cachedRecords []*record) ([]*pb.Output, []*pb.Output, []*pb.Blocked) {
var txos []*pb.Output
var records []*record
var blockedRecords []*record
var blocked []*pb.Blocked
var blockedMap map[string]*pb.Blocked
if len(cachedRecords) < 0 {
records = make([]*record, 0, searchResult.TotalHits())
var r record
@ -287,6 +307,9 @@ func (s *Server) postProcessResults(
records = append(records, &t)
}
}
} else {
records = cachedRecords
}
//printJsonFullResults(searchResult)
records, blockedRecords, blockedMap = removeBlocked(records)