Pull request: all: rm var shadowing, vol. 2
Updates #2803. Squashed commit of the following: commit bf7186378164f19ea9e21ec832526792efa2f9c3 Author: Ainar Garipov <A.Garipov@AdGuard.COM> Date: Thu Mar 11 19:48:17 2021 +0300 all: rm var shadowing, vol. 2
This commit is contained in:
parent
dfdbfee4fd
commit
4cf44dd1d4
|
@ -850,7 +850,7 @@ func New(c *Config, blockFilters []Filter) *DNSFilter {
|
||||||
d.BlockedServices = bsvcs
|
d.BlockedServices = bsvcs
|
||||||
|
|
||||||
if blockFilters != nil {
|
if blockFilters != nil {
|
||||||
err := d.initFiltering(nil, blockFilters)
|
err = d.initFiltering(nil, blockFilters)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Can't initialize filtering subsystem: %s", err)
|
log.Error("Can't initialize filtering subsystem: %s", err)
|
||||||
d.Close()
|
d.Close()
|
||||||
|
|
|
@ -229,7 +229,9 @@ func (c *sbCtx) processTXT(resp *dns.Msg) (bool, [][]byte) {
|
||||||
if !matched {
|
if !matched {
|
||||||
var hash32 [32]byte
|
var hash32 [32]byte
|
||||||
copy(hash32[:], hash)
|
copy(hash32[:], hash)
|
||||||
hashHost, ok := c.hashToHost[hash32]
|
|
||||||
|
var hashHost string
|
||||||
|
hashHost, ok = c.hashToHost[hash32]
|
||||||
if ok {
|
if ok {
|
||||||
log.Debug("%s: matched %s by %s/%s", c.svc, c.host, hashHost, t)
|
log.Debug("%s: matched %s by %s/%s", c.svc, c.host, hashHost, t)
|
||||||
matched = true
|
matched = true
|
||||||
|
|
|
@ -280,7 +280,8 @@ func decodeResultRules(dec *json.Decoder, ent *logEntry) {
|
||||||
|
|
||||||
i := 0
|
i := 0
|
||||||
for {
|
for {
|
||||||
keyToken, err := dec.Token()
|
var keyToken json.Token
|
||||||
|
keyToken, err = dec.Token()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err != io.EOF {
|
if err != io.EOF {
|
||||||
log.Debug("decodeResultRules err: %s", err)
|
log.Debug("decodeResultRules err: %s", err)
|
||||||
|
@ -405,7 +406,8 @@ func decodeResultDNSRewriteResult(dec *json.Decoder, ent *logEntry) {
|
||||||
// code for a longer time than planned.
|
// code for a longer time than planned.
|
||||||
switch key {
|
switch key {
|
||||||
case "RCode":
|
case "RCode":
|
||||||
vToken, err := dec.Token()
|
var vToken json.Token
|
||||||
|
vToken, err = dec.Token()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err != io.EOF {
|
if err != io.EOF {
|
||||||
log.Debug("decodeResultDNSRewriteResult err: %s", err)
|
log.Debug("decodeResultDNSRewriteResult err: %s", err)
|
||||||
|
@ -418,7 +420,8 @@ func decodeResultDNSRewriteResult(dec *json.Decoder, ent *logEntry) {
|
||||||
ent.Result.DNSRewriteResult = &dnsfilter.DNSRewriteResult{}
|
ent.Result.DNSRewriteResult = &dnsfilter.DNSRewriteResult{}
|
||||||
}
|
}
|
||||||
|
|
||||||
if n, ok := vToken.(json.Number); ok {
|
var n json.Number
|
||||||
|
if n, ok = vToken.(json.Number); ok {
|
||||||
rcode64, _ := n.Int64()
|
rcode64, _ := n.Int64()
|
||||||
ent.Result.DNSRewriteResult.RCode = rules.RCode(rcode64)
|
ent.Result.DNSRewriteResult.RCode = rules.RCode(rcode64)
|
||||||
}
|
}
|
||||||
|
|
|
@ -150,10 +150,9 @@ func (l *queryLog) parseSearchCriteria(q url.Values, name string, ct criteriaTyp
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseSearchParams - parses "searchParams" from the HTTP request's query string
|
// parseSearchParams - parses "searchParams" from the HTTP request's query string
|
||||||
func (l *queryLog) parseSearchParams(r *http.Request) (*searchParams, error) {
|
func (l *queryLog) parseSearchParams(r *http.Request) (p *searchParams, err error) {
|
||||||
p := newSearchParams()
|
p = newSearchParams()
|
||||||
|
|
||||||
var err error
|
|
||||||
q := r.URL.Query()
|
q := r.URL.Query()
|
||||||
olderThan := q.Get("older_than")
|
olderThan := q.Get("older_than")
|
||||||
if len(olderThan) != 0 {
|
if len(olderThan) != 0 {
|
||||||
|
@ -163,11 +162,14 @@ func (l *queryLog) parseSearchParams(r *http.Request) (*searchParams, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if limit, err := strconv.ParseInt(q.Get("limit"), 10, 64); err == nil {
|
var limit64 int64
|
||||||
p.limit = int(limit)
|
if limit64, err = strconv.ParseInt(q.Get("limit"), 10, 64); err == nil {
|
||||||
|
p.limit = int(limit64)
|
||||||
}
|
}
|
||||||
if offset, err := strconv.ParseInt(q.Get("offset"), 10, 64); err == nil {
|
|
||||||
p.offset = int(offset)
|
var offset64 int64
|
||||||
|
if offset64, err = strconv.ParseInt(q.Get("offset"), 10, 64); err == nil {
|
||||||
|
p.offset = int(offset64)
|
||||||
|
|
||||||
// If we don't use "olderThan" and use offset/limit instead, we should change the default behavior
|
// If we don't use "olderThan" and use offset/limit instead, we should change the default behavior
|
||||||
// and scan all log records until we found enough log entries
|
// and scan all log records until we found enough log entries
|
||||||
|
@ -180,7 +182,9 @@ func (l *queryLog) parseSearchParams(r *http.Request) (*searchParams, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range paramNames {
|
for k, v := range paramNames {
|
||||||
ok, c, err := l.parseSearchCriteria(q, k, v)
|
var ok bool
|
||||||
|
var c searchCriteria
|
||||||
|
ok, c, err = l.parseSearchCriteria(q, k, v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -225,7 +225,8 @@ func TestQLogFile_SeekTS_bad(t *testing.T) {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
assert.NotEqualValues(t, 0, tc.ts)
|
assert.NotEqualValues(t, 0, tc.ts)
|
||||||
|
|
||||||
_, depth, err := q.SeekTS(tc.ts)
|
var depth int
|
||||||
|
_, depth, err = q.SeekTS(tc.ts)
|
||||||
assert.NotEmpty(t, l.num)
|
assert.NotEmpty(t, l.num)
|
||||||
require.NotNil(t, err)
|
require.NotNil(t, err)
|
||||||
if tc.leq {
|
if tc.leq {
|
||||||
|
|
|
@ -112,11 +112,11 @@ func TestQLogReader_Seek(t *testing.T) {
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
timestamp, err := time.Parse(time.RFC3339Nano, tc.time)
|
ts, err := time.Parse(time.RFC3339Nano, tc.time)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
|
|
||||||
err = r.SeekTS(timestamp.UnixNano())
|
err = r.SeekTS(ts.UnixNano())
|
||||||
assert.True(t, errors.Is(err, tc.want))
|
assert.True(t, errors.Is(err, tc.want), err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -146,7 +146,7 @@ func TestQLogReader_ReadNext(t *testing.T) {
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
|
|
||||||
for i := 1; i < tc.start; i++ {
|
for i := 1; i < tc.start; i++ {
|
||||||
_, err := r.ReadNext()
|
_, err = r.ReadNext()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -118,8 +118,9 @@ func (l *queryLog) searchFiles(params *searchParams) ([]*logEntry, time.Time, in
|
||||||
// The idea is to make search calls faster so that the UI could handle it and show something
|
// The idea is to make search calls faster so that the UI could handle it and show something
|
||||||
// This behavior can be overridden if "maxFileScanEntries" is set to 0
|
// This behavior can be overridden if "maxFileScanEntries" is set to 0
|
||||||
for total < params.maxFileScanEntries || params.maxFileScanEntries <= 0 {
|
for total < params.maxFileScanEntries || params.maxFileScanEntries <= 0 {
|
||||||
entry, ts, err := l.readNextEntry(r, params)
|
var entry *logEntry
|
||||||
|
var ts int64
|
||||||
|
entry, ts, err = l.readNextEntry(r, params)
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
// there's nothing to read anymore
|
// there's nothing to read anymore
|
||||||
break
|
break
|
||||||
|
|
Loading…
Reference in New Issue