Pull request: 2639 use testify require vol.1

Merge in DNS/adguard-home from 2639-testify-require-1 to master

Updates #2639.

Squashed commit of the following:

commit da7d283c6b20b4dbbc0af4689fa812d14f022b52
Merge: c4af71b0 63e4adc0
Author: Eugene Burkov <e.burkov@adguard.com>
Date:   Tue Feb 9 14:27:41 2021 +0300

    Merge branch 'master' into 2639-testify-require-1

commit c4af71b002dc68785106328f60946d7fa73fb933
Author: Eugene Burkov <e.burkov@adguard.com>
Date:   Mon Feb 8 19:32:51 2021 +0300

    querylog: fix tests for windows

commit b616ea5de88a38550ffd42253d3054ea6f90cff9
Author: Eugene Burkov <e.burkov@adguard.com>
Date:   Mon Feb 8 18:29:28 2021 +0300

    querylog: imp tests again

commit 091a698df5fbe6c3e572fde12da395f527c88b95
Author: Eugene Burkov <e.burkov@adguard.com>
Date:   Mon Feb 8 15:49:38 2021 +0300

    querylog: imp tests
This commit is contained in:
Eugene Burkov 2021-02-09 15:17:02 +03:00
parent 63e4adc0e7
commit 1fa4d55ae3
3 changed files with 588 additions and 631 deletions

View File

@ -1,9 +1,12 @@
package querylog package querylog
import ( import (
"fmt"
"io/ioutil"
"math/rand" "math/rand"
"net" "net"
"os" "os"
"runtime"
"sort" "sort"
"testing" "testing"
"time" "time"
@ -14,226 +17,292 @@ import (
"github.com/AdguardTeam/AdGuardHome/internal/dnsfilter" "github.com/AdguardTeam/AdGuardHome/internal/dnsfilter"
"github.com/miekg/dns" "github.com/miekg/dns"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
aghtest.DiscardLogOutput(m) aghtest.DiscardLogOutput(m)
} }
func prepareTestDir() string { func prepareTestDir(t *testing.T) string {
const dir = "./agh-test" t.Helper()
_ = os.RemoveAll(dir)
_ = os.MkdirAll(dir, 0o755) wd, err := os.Getwd()
require.Nil(t, err)
dir, err := ioutil.TempDir(wd, "agh-tests")
require.Nil(t, err)
require.NotEmpty(t, dir)
t.Cleanup(func() {
// TODO(e.burkov): Replace with t.TempDir methods after updating
// go version to 1.15.
start := time.Now()
for {
err := os.RemoveAll(dir)
if err == nil {
break
}
if runtime.GOOS != "windows" || time.Since(start) >= 500*time.Millisecond {
break
}
time.Sleep(5 * time.Millisecond)
}
assert.Nil(t, err)
})
return dir return dir
} }
// Check adding and loading (with filtering) entries from disk and memory // TestQueryLog tests adding and loading (with filtering) entries from disk and
// memory.
func TestQueryLog(t *testing.T) { func TestQueryLog(t *testing.T) {
conf := Config{ l := newQueryLog(Config{
Enabled: true, Enabled: true,
FileEnabled: true, FileEnabled: true,
Interval: 1, Interval: 1,
MemSize: 100, MemSize: 100,
} BaseDir: prepareTestDir(t),
conf.BaseDir = prepareTestDir() })
defer func() { _ = os.RemoveAll(conf.BaseDir) }()
l := newQueryLog(conf)
// add disk entries // Add disk entries.
addEntry(l, "example.org", net.IPv4(1, 1, 1, 1), net.IPv4(2, 2, 2, 1)) addEntry(l, "example.org", net.IPv4(1, 1, 1, 1), net.IPv4(2, 2, 2, 1))
// write to disk (first file) // Write to disk (first file).
_ = l.flushLogBuffer(true) require.Nil(t, l.flushLogBuffer(true))
// start writing to the second file // Start writing to the second file.
_ = l.rotate() require.Nil(t, l.rotate())
// add disk entries // Add disk entries.
addEntry(l, "example.org", net.IPv4(1, 1, 1, 2), net.IPv4(2, 2, 2, 2)) addEntry(l, "example.org", net.IPv4(1, 1, 1, 2), net.IPv4(2, 2, 2, 2))
// write to disk // Write to disk.
_ = l.flushLogBuffer(true) require.Nil(t, l.flushLogBuffer(true))
// add memory entries // Add memory entries.
addEntry(l, "test.example.org", net.IPv4(1, 1, 1, 3), net.IPv4(2, 2, 2, 3)) addEntry(l, "test.example.org", net.IPv4(1, 1, 1, 3), net.IPv4(2, 2, 2, 3))
addEntry(l, "example.com", net.IPv4(1, 1, 1, 4), net.IPv4(2, 2, 2, 4)) addEntry(l, "example.com", net.IPv4(1, 1, 1, 4), net.IPv4(2, 2, 2, 4))
// get all entries type tcAssertion struct {
params := newSearchParams() num int
entries, _ := l.search(params) host string
assert.Len(t, entries, 4) answer, client net.IP
assertLogEntry(t, entries[0], "example.com", net.IPv4(1, 1, 1, 4), net.IPv4(2, 2, 2, 4)) }
assertLogEntry(t, entries[1], "test.example.org", net.IPv4(1, 1, 1, 3), net.IPv4(2, 2, 2, 3))
assertLogEntry(t, entries[2], "example.org", net.IPv4(1, 1, 1, 2), net.IPv4(2, 2, 2, 2))
assertLogEntry(t, entries[3], "example.org", net.IPv4(1, 1, 1, 1), net.IPv4(2, 2, 2, 1))
// search by domain (strict) testCases := []struct {
params = newSearchParams() name string
params.searchCriteria = append(params.searchCriteria, searchCriteria{ sCr []searchCriteria
want []tcAssertion
}{{
name: "all",
sCr: []searchCriteria{},
want: []tcAssertion{
{num: 0, host: "example.com", answer: net.IPv4(1, 1, 1, 4), client: net.IPv4(2, 2, 2, 4)},
{num: 1, host: "test.example.org", answer: net.IPv4(1, 1, 1, 3), client: net.IPv4(2, 2, 2, 3)},
{num: 2, host: "example.org", answer: net.IPv4(1, 1, 1, 2), client: net.IPv4(2, 2, 2, 2)},
{num: 3, host: "example.org", answer: net.IPv4(1, 1, 1, 1), client: net.IPv4(2, 2, 2, 1)},
},
}, {
name: "by_domain_strict",
sCr: []searchCriteria{{
criteriaType: ctDomainOrClient, criteriaType: ctDomainOrClient,
strict: true, strict: true,
value: "TEST.example.org", value: "TEST.example.org",
}) }},
entries, _ = l.search(params) want: []tcAssertion{{
assert.Len(t, entries, 1) num: 0, host: "test.example.org", answer: net.IPv4(1, 1, 1, 3), client: net.IPv4(2, 2, 2, 3),
assertLogEntry(t, entries[0], "test.example.org", net.IPv4(1, 1, 1, 3), net.IPv4(2, 2, 2, 3)) }},
}, {
// search by domain (not strict) name: "by_domain_non-strict",
params = newSearchParams() sCr: []searchCriteria{{
params.searchCriteria = append(params.searchCriteria, searchCriteria{
criteriaType: ctDomainOrClient, criteriaType: ctDomainOrClient,
strict: false, strict: false,
value: "example.ORG", value: "example.ORG",
}) }},
entries, _ = l.search(params) want: []tcAssertion{
assert.Len(t, entries, 3) {num: 0, host: "test.example.org", answer: net.IPv4(1, 1, 1, 3), client: net.IPv4(2, 2, 2, 3)},
assertLogEntry(t, entries[0], "test.example.org", net.IPv4(1, 1, 1, 3), net.IPv4(2, 2, 2, 3)) {num: 1, host: "example.org", answer: net.IPv4(1, 1, 1, 2), client: net.IPv4(2, 2, 2, 2)},
assertLogEntry(t, entries[1], "example.org", net.IPv4(1, 1, 1, 2), net.IPv4(2, 2, 2, 2)) {num: 2, host: "example.org", answer: net.IPv4(1, 1, 1, 1), client: net.IPv4(2, 2, 2, 1)},
assertLogEntry(t, entries[2], "example.org", net.IPv4(1, 1, 1, 1), net.IPv4(2, 2, 2, 1)) },
}, {
// search by client IP (strict) name: "by_client_ip_strict",
params = newSearchParams() sCr: []searchCriteria{{
params.searchCriteria = append(params.searchCriteria, searchCriteria{
criteriaType: ctDomainOrClient, criteriaType: ctDomainOrClient,
strict: true, strict: true,
value: "2.2.2.2", value: "2.2.2.2",
}) }},
entries, _ = l.search(params) want: []tcAssertion{{
assert.Len(t, entries, 1) num: 0, host: "example.org", answer: net.IPv4(1, 1, 1, 2), client: net.IPv4(2, 2, 2, 2),
assertLogEntry(t, entries[0], "example.org", net.IPv4(1, 1, 1, 2), net.IPv4(2, 2, 2, 2)) }},
}, {
// search by client IP (part of) name: "by_client_ip_non-strict",
params = newSearchParams() sCr: []searchCriteria{{
params.searchCriteria = append(params.searchCriteria, searchCriteria{
criteriaType: ctDomainOrClient, criteriaType: ctDomainOrClient,
strict: false, strict: false,
value: "2.2.2", value: "2.2.2",
}},
want: []tcAssertion{
{num: 0, host: "example.com", answer: net.IPv4(1, 1, 1, 4), client: net.IPv4(2, 2, 2, 4)},
{num: 1, host: "test.example.org", answer: net.IPv4(1, 1, 1, 3), client: net.IPv4(2, 2, 2, 3)},
{num: 2, host: "example.org", answer: net.IPv4(1, 1, 1, 2), client: net.IPv4(2, 2, 2, 2)},
{num: 3, host: "example.org", answer: net.IPv4(1, 1, 1, 1), client: net.IPv4(2, 2, 2, 1)},
},
}}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
params := newSearchParams()
params.searchCriteria = tc.sCr
entries, _ := l.search(params)
require.Len(t, entries, len(tc.want))
for _, want := range tc.want {
assertLogEntry(t, entries[want.num], want.host, want.answer, want.client)
}
}) })
entries, _ = l.search(params) }
assert.Len(t, entries, 4)
assertLogEntry(t, entries[0], "example.com", net.IPv4(1, 1, 1, 4), net.IPv4(2, 2, 2, 4))
assertLogEntry(t, entries[1], "test.example.org", net.IPv4(1, 1, 1, 3), net.IPv4(2, 2, 2, 3))
assertLogEntry(t, entries[2], "example.org", net.IPv4(1, 1, 1, 2), net.IPv4(2, 2, 2, 2))
assertLogEntry(t, entries[3], "example.org", net.IPv4(1, 1, 1, 1), net.IPv4(2, 2, 2, 1))
} }
func TestQueryLogOffsetLimit(t *testing.T) { func TestQueryLogOffsetLimit(t *testing.T) {
conf := Config{ l := newQueryLog(Config{
Enabled: true, Enabled: true,
Interval: 1, Interval: 1,
MemSize: 100, MemSize: 100,
} BaseDir: prepareTestDir(t),
conf.BaseDir = prepareTestDir() })
defer func() { _ = os.RemoveAll(conf.BaseDir) }()
l := newQueryLog(conf)
// add 10 entries to the log const (
for i := 0; i < 10; i++ { entNum = 10
addEntry(l, "second.example.org", net.IPv4(1, 1, 1, 1), net.IPv4(2, 2, 2, 1)) firstPageDomain = "first.example.org"
secondPageDomain = "second.example.org"
)
// Add entries to the log.
for i := 0; i < entNum; i++ {
addEntry(l, secondPageDomain, net.IPv4(1, 1, 1, 1), net.IPv4(2, 2, 2, 1))
} }
// write them to disk (first file) // Write them to the first file.
_ = l.flushLogBuffer(true) require.Nil(t, l.flushLogBuffer(true))
// add 10 more entries to the log (memory) // Add more to the in-memory part of log.
for i := 0; i < 10; i++ { for i := 0; i < entNum; i++ {
addEntry(l, "first.example.org", net.IPv4(1, 1, 1, 1), net.IPv4(2, 2, 2, 1)) addEntry(l, firstPageDomain, net.IPv4(1, 1, 1, 1), net.IPv4(2, 2, 2, 1))
} }
// First page
params := newSearchParams() params := newSearchParams()
params.offset = 0
params.limit = 10 testCases := []struct {
name string
offset int
limit int
wantLen int
want string
}{{
name: "page_1",
offset: 0,
limit: 10,
wantLen: 10,
want: firstPageDomain,
}, {
name: "page_2",
offset: 10,
limit: 10,
wantLen: 10,
want: secondPageDomain,
}, {
name: "page_2.5",
offset: 15,
limit: 10,
wantLen: 5,
want: secondPageDomain,
}, {
name: "page_3",
offset: 20,
limit: 10,
wantLen: 0,
}}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
params.offset = tc.offset
params.limit = tc.limit
entries, _ := l.search(params) entries, _ := l.search(params)
assert.Len(t, entries, 10)
assert.Equal(t, entries[0].QHost, "first.example.org")
assert.Equal(t, entries[9].QHost, "first.example.org")
// Second page require.Len(t, entries, tc.wantLen)
params.offset = 10
params.limit = 10
entries, _ = l.search(params)
assert.Len(t, entries, 10)
assert.Equal(t, entries[0].QHost, "second.example.org")
assert.Equal(t, entries[9].QHost, "second.example.org")
// Second and a half page if tc.wantLen > 0 {
params.offset = 15 assert.Equal(t, entries[0].QHost, tc.want)
params.limit = 10 assert.Equal(t, entries[tc.wantLen-1].QHost, tc.want)
entries, _ = l.search(params) }
assert.Len(t, entries, 5) })
assert.Equal(t, entries[0].QHost, "second.example.org") }
assert.Equal(t, entries[4].QHost, "second.example.org")
// Third page
params.offset = 20
params.limit = 10
entries, _ = l.search(params)
assert.Empty(t, entries)
} }
func TestQueryLogMaxFileScanEntries(t *testing.T) { func TestQueryLogMaxFileScanEntries(t *testing.T) {
conf := Config{ l := newQueryLog(Config{
Enabled: true, Enabled: true,
FileEnabled: true, FileEnabled: true,
Interval: 1, Interval: 1,
MemSize: 100, MemSize: 100,
} BaseDir: prepareTestDir(t),
conf.BaseDir = prepareTestDir() })
defer func() { _ = os.RemoveAll(conf.BaseDir) }()
l := newQueryLog(conf)
// add 10 entries to the log const entNum = 10
for i := 0; i < 10; i++ { // Add entries to the log.
for i := 0; i < entNum; i++ {
addEntry(l, "example.org", net.IPv4(1, 1, 1, 1), net.IPv4(2, 2, 2, 1)) addEntry(l, "example.org", net.IPv4(1, 1, 1, 1), net.IPv4(2, 2, 2, 1))
} }
// write them to disk (first file) // Write them to disk.
_ = l.flushLogBuffer(true) require.Nil(t, l.flushLogBuffer(true))
params := newSearchParams() params := newSearchParams()
params.maxFileScanEntries = 5 // do not scan more than 5 records
entries, _ := l.search(params)
assert.Len(t, entries, 5)
params.maxFileScanEntries = 0 // disable the limit for _, maxFileScanEntries := range []int{5, 0} {
entries, _ = l.search(params) t.Run(fmt.Sprintf("limit_%d", maxFileScanEntries), func(t *testing.T) {
assert.Len(t, entries, 10) params.maxFileScanEntries = maxFileScanEntries
entries, _ := l.search(params)
assert.Len(t, entries, entNum-maxFileScanEntries)
})
}
} }
func TestQueryLogFileDisabled(t *testing.T) { func TestQueryLogFileDisabled(t *testing.T) {
conf := Config{ l := newQueryLog(Config{
Enabled: true, Enabled: true,
FileEnabled: false, FileEnabled: false,
Interval: 1, Interval: 1,
MemSize: 2, MemSize: 2,
} BaseDir: prepareTestDir(t),
conf.BaseDir = prepareTestDir() })
defer func() { _ = os.RemoveAll(conf.BaseDir) }()
l := newQueryLog(conf)
addEntry(l, "example1.org", net.IPv4(1, 1, 1, 1), net.IPv4(2, 2, 2, 1)) addEntry(l, "example1.org", net.IPv4(1, 1, 1, 1), net.IPv4(2, 2, 2, 1))
addEntry(l, "example2.org", net.IPv4(1, 1, 1, 1), net.IPv4(2, 2, 2, 1)) addEntry(l, "example2.org", net.IPv4(1, 1, 1, 1), net.IPv4(2, 2, 2, 1))
// The oldest entry is going to be removed from memory buffer.
addEntry(l, "example3.org", net.IPv4(1, 1, 1, 1), net.IPv4(2, 2, 2, 1)) addEntry(l, "example3.org", net.IPv4(1, 1, 1, 1), net.IPv4(2, 2, 2, 1))
// the oldest entry is now removed from mem buffer
params := newSearchParams() params := newSearchParams()
ll, _ := l.search(params) ll, _ := l.search(params)
assert.Len(t, ll, 2) require.Len(t, ll, 2)
assert.Equal(t, "example3.org", ll[0].QHost) assert.Equal(t, "example3.org", ll[0].QHost)
assert.Equal(t, "example2.org", ll[1].QHost) assert.Equal(t, "example2.org", ll[1].QHost)
} }
func addEntry(l *queryLog, host string, answerStr, client net.IP) { func addEntry(l *queryLog, host string, answerStr, client net.IP) {
q := dns.Msg{} q := dns.Msg{
q.Question = append(q.Question, dns.Question{ Question: []dns.Question{{
Name: host + ".", Name: host + ".",
Qtype: dns.TypeA, Qtype: dns.TypeA,
Qclass: dns.ClassINET, Qclass: dns.ClassINET,
}) }},
}
a := dns.Msg{} a := dns.Msg{
a.Question = append(a.Question, q.Question[0]) Question: q.Question,
answer := new(dns.A) Answer: []dns.RR{&dns.A{
answer.Hdr = dns.RR_Header{ Hdr: dns.RR_Header{
Name: q.Question[0].Name, Name: q.Question[0].Name,
Rrtype: dns.TypeA, Rrtype: dns.TypeA,
Class: dns.ClassINET, Class: dns.ClassINET,
},
A: answerStr,
}},
} }
answer.A = answerStr
a.Answer = append(a.Answer, answer)
res := dnsfilter.Result{ res := dnsfilter.Result{
IsFiltered: true, IsFiltered: true,
Reason: dnsfilter.Rewritten, Reason: dnsfilter.Rewritten,
@ -254,19 +323,22 @@ func addEntry(l *queryLog, host string, answerStr, client net.IP) {
l.Add(params) l.Add(params)
} }
func assertLogEntry(t *testing.T, entry *logEntry, host string, answer, client net.IP) bool { func assertLogEntry(t *testing.T, entry *logEntry, host string, answer, client net.IP) {
t.Helper()
require.NotNil(t, entry)
assert.Equal(t, host, entry.QHost) assert.Equal(t, host, entry.QHost)
assert.Equal(t, client, entry.IP) assert.Equal(t, client, entry.IP)
assert.Equal(t, "A", entry.QType) assert.Equal(t, "A", entry.QType)
assert.Equal(t, "IN", entry.QClass) assert.Equal(t, "IN", entry.QClass)
msg := new(dns.Msg) msg := &dns.Msg{}
assert.Nil(t, msg.Unpack(entry.Answer)) require.Nil(t, msg.Unpack(entry.Answer))
assert.Len(t, msg.Answer, 1) require.Len(t, msg.Answer, 1)
ip := proxyutil.GetIPFromDNSRecord(msg.Answer[0]).To16() ip := proxyutil.GetIPFromDNSRecord(msg.Answer[0]).To16()
assert.NotNil(t, ip)
assert.Equal(t, answer, ip) assert.Equal(t, answer, ip)
return true
} }
func testEntries() (entries []*logEntry) { func testEntries() (entries []*logEntry) {
@ -332,8 +404,8 @@ func TestLogEntriesByTime_sort(t *testing.T) {
entries := testEntries() entries := testEntries()
sort.Sort(logEntriesByTimeDesc(entries)) sort.Sort(logEntriesByTimeDesc(entries))
for i := 1; i < len(entries); i++ { for i := range entries[1:] {
assert.False(t, entries[i].Time.After(entries[i-1].Time), assert.False(t, entries[i+1].Time.After(entries[i].Time),
"%s %s", entries[i].Time, entries[i-1].Time) "%s %s", entries[i+1].Time, entries[i].Time)
} }
} }

View File

@ -2,347 +2,340 @@ package querylog
import ( import (
"encoding/binary" "encoding/binary"
"errors"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"math" "math"
"net" "net"
"os"
"strings" "strings"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestQLogFileEmpty(t *testing.T) { // prepareTestFiles prepares several test query log files, each with the
testDir := prepareTestDir() // specified lines count.
defer func() { _ = os.RemoveAll(testDir) }() func prepareTestFiles(t *testing.T, dir string, filesNum, linesNum int) []string {
testFile := prepareTestFile(testDir, 0) t.Helper()
// create the new QLogFile instance const strV = "\"%s\""
q, err := NewQLogFile(testFile) const nl = "\n"
assert.Nil(t, err) const format = `{"IP":` + strV + `,"T":` + strV + `,` +
assert.NotNil(t, q) `"QH":"example.org","QT":"A","QC":"IN",` +
defer q.Close() `"Answer":"AAAAAAABAAEAAAAAB2V4YW1wbGUDb3JnAAABAAEHZXhhbXBsZQNvcmcAAAEAAQAAAAAABAECAwQ=",` +
`"Result":{},"Elapsed":0,"Upstream":"upstream"}` + nl
// seek to the start
pos, err := q.SeekStart()
assert.Nil(t, err)
assert.EqualValues(t, 0, pos)
// try reading anyway
line, err := q.ReadNext()
assert.Equal(t, io.EOF, err)
assert.Empty(t, line)
}
func TestQLogFileLarge(t *testing.T) {
// should be large enough
count := 50000
testDir := prepareTestDir()
defer func() { _ = os.RemoveAll(testDir) }()
testFile := prepareTestFile(testDir, count)
// create the new QLogFile instance
q, err := NewQLogFile(testFile)
assert.Nil(t, err)
assert.NotNil(t, q)
defer q.Close()
// seek to the start
pos, err := q.SeekStart()
assert.Nil(t, err)
assert.NotEqualValues(t, 0, pos)
read := 0
var line string
for err == nil {
line, err = q.ReadNext()
if err == nil {
assert.NotZero(t, len(line))
read++
}
}
assert.Equal(t, count, read)
assert.Equal(t, io.EOF, err)
}
func TestQLogFileSeekLargeFile(t *testing.T) {
// more or less big file
count := 10000
testDir := prepareTestDir()
defer func() { _ = os.RemoveAll(testDir) }()
testFile := prepareTestFile(testDir, count)
// create the new QLogFile instance
q, err := NewQLogFile(testFile)
assert.Nil(t, err)
assert.NotNil(t, q)
defer q.Close()
// CASE 1: NOT TOO OLD LINE
testSeekLineQLogFile(t, q, 300)
// CASE 2: OLD LINE
testSeekLineQLogFile(t, q, count-300)
// CASE 3: FIRST LINE
testSeekLineQLogFile(t, q, 0)
// CASE 4: LAST LINE
testSeekLineQLogFile(t, q, count)
// CASE 5: Seek non-existent (too low)
_, _, err = q.SeekTS(123)
assert.NotNil(t, err)
// CASE 6: Seek non-existent (too high)
ts, _ := time.Parse(time.RFC3339, "2100-01-02T15:04:05Z07:00")
_, _, err = q.SeekTS(ts.UnixNano())
assert.NotNil(t, err)
// CASE 7: "Almost" found
line, err := getQLogFileLine(q, count/2)
assert.Nil(t, err)
// ALMOST the record we need
timestamp := readQLogTimestamp(line) - 1
assert.NotEqualValues(t, 0, timestamp)
_, depth, err := q.SeekTS(timestamp)
assert.NotNil(t, err)
assert.LessOrEqual(t, depth, int(math.Log2(float64(count))+3))
}
func TestQLogFileSeekSmallFile(t *testing.T) {
// more or less big file
count := 10
testDir := prepareTestDir()
defer func() { _ = os.RemoveAll(testDir) }()
testFile := prepareTestFile(testDir, count)
// create the new QLogFile instance
q, err := NewQLogFile(testFile)
assert.Nil(t, err)
assert.NotNil(t, q)
defer q.Close()
// CASE 1: NOT TOO OLD LINE
testSeekLineQLogFile(t, q, 2)
// CASE 2: OLD LINE
testSeekLineQLogFile(t, q, count-2)
// CASE 3: FIRST LINE
testSeekLineQLogFile(t, q, 0)
// CASE 4: LAST LINE
testSeekLineQLogFile(t, q, count)
// CASE 5: Seek non-existent (too low)
_, _, err = q.SeekTS(123)
assert.NotNil(t, err)
// CASE 6: Seek non-existent (too high)
ts, _ := time.Parse(time.RFC3339, "2100-01-02T15:04:05Z07:00")
_, _, err = q.SeekTS(ts.UnixNano())
assert.NotNil(t, err)
// CASE 7: "Almost" found
line, err := getQLogFileLine(q, count/2)
assert.Nil(t, err)
// ALMOST the record we need
timestamp := readQLogTimestamp(line) - 1
assert.NotEqualValues(t, 0, timestamp)
_, depth, err := q.SeekTS(timestamp)
assert.NotNil(t, err)
assert.LessOrEqual(t, depth, int(math.Log2(float64(count))+3))
}
func testSeekLineQLogFile(t *testing.T, q *QLogFile, lineNumber int) {
line, err := getQLogFileLine(q, lineNumber)
assert.Nil(t, err)
ts := readQLogTimestamp(line)
assert.NotEqualValues(t, 0, ts)
// try seeking to that line now
pos, _, err := q.SeekTS(ts)
assert.Nil(t, err)
assert.NotEqualValues(t, 0, pos)
testLine, err := q.ReadNext()
assert.Nil(t, err)
assert.Equal(t, line, testLine)
}
func getQLogFileLine(q *QLogFile, lineNumber int) (string, error) {
_, err := q.SeekStart()
if err != nil {
return "", err
}
for i := 1; i < lineNumber; i++ {
_, err := q.ReadNext()
if err != nil {
return "", err
}
}
return q.ReadNext()
}
// Check adding and loading (with filtering) entries from disk and memory
func TestQLogFile(t *testing.T) {
testDir := prepareTestDir()
defer func() { _ = os.RemoveAll(testDir) }()
testFile := prepareTestFile(testDir, 2)
// create the new QLogFile instance
q, err := NewQLogFile(testFile)
assert.Nil(t, err)
assert.NotNil(t, q)
defer q.Close()
// seek to the start
pos, err := q.SeekStart()
assert.Nil(t, err)
assert.Greater(t, pos, int64(0))
// read first line
line, err := q.ReadNext()
assert.Nil(t, err)
assert.Contains(t, line, "0.0.0.2")
assert.True(t, strings.HasPrefix(line, "{"), line)
assert.True(t, strings.HasSuffix(line, "}"), line)
// read second line
line, err = q.ReadNext()
assert.Nil(t, err)
assert.EqualValues(t, 0, q.position)
assert.Contains(t, line, "0.0.0.1")
assert.True(t, strings.HasPrefix(line, "{"), line)
assert.True(t, strings.HasSuffix(line, "}"), line)
// try reading again (there's nothing to read anymore)
line, err = q.ReadNext()
assert.Equal(t, io.EOF, err)
assert.Empty(t, line)
}
// prepareTestFile - prepares a test query log file with the specified number of lines
func prepareTestFile(dir string, linesCount int) string {
return prepareTestFiles(dir, 1, linesCount)[0]
}
// prepareTestFiles - prepares several test query log files
// each of them -- with the specified linesCount
func prepareTestFiles(dir string, filesCount, linesCount int) []string {
format := `{"IP":"${IP}","T":"${TIMESTAMP}","QH":"example.org","QT":"A","QC":"IN","Answer":"AAAAAAABAAEAAAAAB2V4YW1wbGUDb3JnAAABAAEHZXhhbXBsZQNvcmcAAAEAAQAAAAAABAECAwQ=","Result":{},"Elapsed":0,"Upstream":"upstream"}`
lineTime, _ := time.Parse(time.RFC3339Nano, "2020-02-18T22:36:35.920973+03:00") lineTime, _ := time.Parse(time.RFC3339Nano, "2020-02-18T22:36:35.920973+03:00")
lineIP := uint32(0) lineIP := uint32(0)
files := make([]string, filesCount) files := make([]string, filesNum)
for j := 0; j < filesCount; j++ { for j := range files {
f, _ := ioutil.TempFile(dir, "*.txt") f, err := ioutil.TempFile(dir, "*.txt")
files[filesCount-j-1] = f.Name() require.Nil(t, err)
files[filesNum-j-1] = f.Name()
for i := 0; i < linesCount; i++ { for i := 0; i < linesNum; i++ {
lineIP++ lineIP++
lineTime = lineTime.Add(time.Second) lineTime = lineTime.Add(time.Second)
ip := make(net.IP, 4) ip := make(net.IP, 4)
binary.BigEndian.PutUint32(ip, lineIP) binary.BigEndian.PutUint32(ip, lineIP)
line := format line := fmt.Sprintf(format, ip, lineTime.Format(time.RFC3339Nano))
line = strings.ReplaceAll(line, "${IP}", ip.String())
line = strings.ReplaceAll(line, "${TIMESTAMP}", lineTime.Format(time.RFC3339Nano))
_, _ = f.WriteString(line) _, err = f.WriteString(line)
_, _ = f.WriteString("\n") require.Nil(t, err)
} }
} }
return files return files
} }
func TestQLogSeek(t *testing.T) { // prepareTestFile prepares a test query log file with the specified number of
testDir := prepareTestDir() // lines.
defer func() { _ = os.RemoveAll(testDir) }() func prepareTestFile(t *testing.T, dir string, linesCount int) string {
t.Helper()
d := `{"T":"2020-08-31T18:44:23.911246629+03:00","QH":"wfqvjymurpwegyv","QT":"A","QC":"IN","CP":"","Answer":"","Result":{},"Elapsed":66286385,"Upstream":"tls://dns-unfiltered.adguard.com:853"} return prepareTestFiles(t, dir, 1, linesCount)[0]
{"T":"2020-08-31T18:44:25.376690873+03:00"}
{"T":"2020-08-31T18:44:25.382540454+03:00"}`
f, _ := ioutil.TempFile(testDir, "*.txt")
_, _ = f.WriteString(d)
defer f.Close()
q, err := NewQLogFile(f.Name())
assert.Nil(t, err)
defer q.Close()
target, _ := time.Parse(time.RFC3339, "2020-08-31T18:44:25.376690873+03:00")
_, depth, err := q.SeekTS(target.UnixNano())
assert.Nil(t, err)
assert.Equal(t, 1, depth)
} }
func TestQLogSeek_ErrTSTooLate(t *testing.T) { // newTestQLogFile creates new *QLogFile for tests and registers the required
testDir := prepareTestDir() // cleanup functions.
func newTestQLogFile(t *testing.T, linesNum int) (file *QLogFile) {
t.Helper()
testFile := prepareTestFile(t, prepareTestDir(t), linesNum)
// Create the new QLogFile instance.
file, err := NewQLogFile(testFile)
require.Nil(t, err)
assert.NotNil(t, file)
t.Cleanup(func() { t.Cleanup(func() {
_ = os.RemoveAll(testDir) assert.Nil(t, file.Close())
}) })
d := `{"T":"2020-08-31T18:44:23.911246629+03:00","QH":"wfqvjymurpwegyv","QT":"A","QC":"IN","CP":"","Answer":"","Result":{},"Elapsed":66286385,"Upstream":"tls://dns-unfiltered.adguard.com:853"} return file
{"T":"2020-08-31T18:44:25.376690873+03:00"}
{"T":"2020-08-31T18:44:25.382540454+03:00"}
`
f, err := ioutil.TempFile(testDir, "*.txt")
assert.Nil(t, err)
defer f.Close()
_, err = f.WriteString(d)
assert.Nil(t, err)
q, err := NewQLogFile(f.Name())
assert.Nil(t, err)
defer q.Close()
target, err := time.Parse(time.RFC3339, "2020-08-31T18:44:25.382540454+03:00")
assert.Nil(t, err)
_, depth, err := q.SeekTS(target.UnixNano() + int64(time.Second))
assert.Equal(t, ErrTSTooLate, err)
assert.Equal(t, 2, depth)
} }
func TestQLogSeek_ErrTSTooEarly(t *testing.T) { func TestQLogFile_ReadNext(t *testing.T) {
testDir := prepareTestDir() testCases := []struct {
name string
linesNum int
}{{
name: "empty",
linesNum: 0,
}, {
name: "large",
linesNum: 50000,
}}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
q := newTestQLogFile(t, tc.linesNum)
// Calculate the expected position.
fileInfo, err := q.file.Stat()
require.Nil(t, err)
var expPos int64
if expPos = fileInfo.Size(); expPos > 0 {
expPos--
}
// Seek to the start.
pos, err := q.SeekStart()
require.Nil(t, err)
require.EqualValues(t, expPos, pos)
var read int
var line string
for err == nil {
line, err = q.ReadNext()
if err == nil {
assert.NotEmpty(t, line)
read++
}
}
require.Equal(t, io.EOF, err)
assert.Equal(t, tc.linesNum, read)
})
}
}
func TestQLogFile_SeekTS_good(t *testing.T) {
linesCases := []struct {
name string
num int
}{{
name: "large",
num: 10000,
}, {
name: "small",
num: 10,
}}
for _, l := range linesCases {
testCases := []struct {
name string
linesNum int
line int
}{{
name: "not_too_old",
line: 2,
}, {
name: "old",
line: l.num - 2,
}, {
name: "first",
line: 0,
}, {
name: "last",
line: l.num,
}}
q := newTestQLogFile(t, l.num)
for _, tc := range testCases {
t.Run(l.name+"_"+tc.name, func(t *testing.T) {
line, err := getQLogFileLine(q, tc.line)
require.Nil(t, err)
ts := readQLogTimestamp(line)
assert.NotEqualValues(t, 0, ts)
// Try seeking to that line now.
pos, _, err := q.SeekTS(ts)
require.Nil(t, err)
assert.NotEqualValues(t, 0, pos)
testLine, err := q.ReadNext()
require.Nil(t, err)
assert.Equal(t, line, testLine)
})
}
}
}
func TestQLogFile_SeekTS_bad(t *testing.T) {
linesCases := []struct {
name string
num int
}{{
name: "large",
num: 10000,
}, {
name: "small",
num: 10,
}}
for _, l := range linesCases {
testCases := []struct {
name string
ts int64
leq bool
}{{
name: "non-existent_long_ago",
}, {
name: "non-existent_far_ahead",
}, {
name: "almost",
leq: true,
}}
q := newTestQLogFile(t, l.num)
testCases[0].ts = 123
lateTS, _ := time.Parse(time.RFC3339, "2100-01-02T15:04:05Z07:00")
testCases[1].ts = lateTS.UnixNano()
line, err := getQLogFileLine(q, l.num/2)
require.Nil(t, err)
testCases[2].ts = readQLogTimestamp(line) - 1
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
assert.NotEqualValues(t, 0, tc.ts)
_, depth, err := q.SeekTS(tc.ts)
assert.NotEmpty(t, l.num)
require.NotNil(t, err)
if tc.leq {
assert.LessOrEqual(t, depth, int(math.Log2(float64(l.num))+3))
}
})
}
}
}
func getQLogFileLine(q *QLogFile, lineNumber int) (line string, err error) {
if _, err = q.SeekStart(); err != nil {
return line, err
}
for i := 1; i < lineNumber; i++ {
if _, err = q.ReadNext(); err != nil {
return line, err
}
}
return q.ReadNext()
}
// Check adding and loading (with filtering) entries from disk and memory.
func TestQLogFile(t *testing.T) {
// Create the new QLogFile instance.
q := newTestQLogFile(t, 2)
// Seek to the start.
pos, err := q.SeekStart()
require.Nil(t, err)
assert.Greater(t, pos, int64(0))
// Read first line.
line, err := q.ReadNext()
require.Nil(t, err)
assert.Contains(t, line, "0.0.0.2")
assert.True(t, strings.HasPrefix(line, "{"), line)
assert.True(t, strings.HasSuffix(line, "}"), line)
// Read second line.
line, err = q.ReadNext()
require.Nil(t, err)
assert.EqualValues(t, 0, q.position)
assert.Contains(t, line, "0.0.0.1")
assert.True(t, strings.HasPrefix(line, "{"), line)
assert.True(t, strings.HasSuffix(line, "}"), line)
// Try reading again (there's nothing to read anymore).
line, err = q.ReadNext()
require.Equal(t, io.EOF, err)
assert.Empty(t, line)
}
func NewTestQLogFileData(t *testing.T, data string) (file *QLogFile) {
f, err := ioutil.TempFile(prepareTestDir(t), "*.txt")
require.Nil(t, err)
t.Cleanup(func() { t.Cleanup(func() {
_ = os.RemoveAll(testDir) assert.Nil(t, f.Close())
}) })
d := `{"T":"2020-08-31T18:44:23.911246629+03:00","QH":"wfqvjymurpwegyv","QT":"A","QC":"IN","CP":"","Answer":"","Result":{},"Elapsed":66286385,"Upstream":"tls://dns-unfiltered.adguard.com:853"} _, err = f.WriteString(data)
{"T":"2020-08-31T18:44:25.376690873+03:00"} require.Nil(t, err)
{"T":"2020-08-31T18:44:25.382540454+03:00"}
`
f, err := ioutil.TempFile(testDir, "*.txt")
assert.Nil(t, err)
defer f.Close()
_, err = f.WriteString(d) file, err = NewQLogFile(f.Name())
assert.Nil(t, err) require.Nil(t, err)
t.Cleanup(func() {
assert.Nil(t, file.Close())
})
q, err := NewQLogFile(f.Name()) return file
assert.Nil(t, err) }
defer q.Close()
func TestQLog_Seek(t *testing.T) {
target, err := time.Parse(time.RFC3339, "2020-08-31T18:44:23.911246629+03:00") const nl = "\n"
assert.Nil(t, err) const strV = "%s"
const recs = `{"T":"` + strV + `","QH":"wfqvjymurpwegyv","QT":"A","QC":"IN","CP":"","Answer":"","Result":{},"Elapsed":66286385,"Upstream":"tls://dns-unfiltered.adguard.com:853"}` + nl +
_, depth, err := q.SeekTS(target.UnixNano() - int64(time.Second)) `{"T":"` + strV + `"}` + nl +
assert.Equal(t, ErrTSTooEarly, err) `{"T":"` + strV + `"}` + nl
assert.Equal(t, 1, depth) timestamp, _ := time.Parse(time.RFC3339Nano, "2020-08-31T18:44:25.376690873+03:00")
testCases := []struct {
name string
delta int
wantErr error
wantDepth int
}{{
name: "ok",
delta: 0,
wantErr: nil,
wantDepth: 2,
}, {
name: "too_late",
delta: 2,
wantErr: ErrTSTooLate,
wantDepth: 2,
}, {
name: "too_early",
delta: -2,
wantErr: ErrTSTooEarly,
wantDepth: 1,
}}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
data := fmt.Sprintf(recs,
timestamp.Add(-time.Second).Format(time.RFC3339Nano),
timestamp.Format(time.RFC3339Nano),
timestamp.Add(time.Second).Format(time.RFC3339Nano),
)
q := NewTestQLogFileData(t, data)
_, depth, err := q.SeekTS(timestamp.Add(time.Second * time.Duration(tc.delta)).UnixNano())
require.Truef(t, errors.Is(err, tc.wantErr), "%v", err)
assert.Equal(t, tc.wantDepth, depth)
})
}
} }

View File

@ -3,110 +3,77 @@ package querylog
import ( import (
"errors" "errors"
"io" "io"
"os"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestQLogReaderEmpty(t *testing.T) { // newTestQLogReader creates new *QLogReader for tests and registers the
r, err := NewQLogReader([]string{}) // required cleanup functions.
assert.Nil(t, err) func newTestQLogReader(t *testing.T, filesNum, linesNum int) (reader *QLogReader) {
assert.NotNil(t, r) t.Helper()
defer r.Close()
// seek to the start testFiles := prepareTestFiles(t, prepareTestDir(t), filesNum, linesNum)
err = r.SeekStart()
assert.Nil(t, err)
line, err := r.ReadNext() // Create the new QLogReader instance.
assert.Empty(t, line) reader, err := NewQLogReader(testFiles)
assert.Equal(t, io.EOF, err) require.Nil(t, err)
assert.NotNil(t, reader)
t.Cleanup(func() {
assert.Nil(t, reader.Close())
})
return reader
} }
func TestQLogReaderOneFile(t *testing.T) { func TestQLogReader(t *testing.T) {
// let's do one small file testCases := []struct {
count := 10 name string
filesCount := 1 filesNum int
linesNum int
}{{
name: "empty",
filesNum: 0,
linesNum: 0,
}, {
name: "one_file",
filesNum: 1,
linesNum: 10,
}, {
name: "multiple_files",
filesNum: 5,
linesNum: 10000,
}}
testDir := prepareTestDir() for _, tc := range testCases {
defer func() { _ = os.RemoveAll(testDir) }() t.Run(tc.name, func(t *testing.T) {
testFiles := prepareTestFiles(testDir, filesCount, count) r := newTestQLogReader(t, tc.filesNum, tc.linesNum)
r, err := NewQLogReader(testFiles) // Seek to the start.
assert.Nil(t, err) err := r.SeekStart()
assert.NotNil(t, r) require.Nil(t, err)
defer r.Close()
// seek to the start // Read everything.
err = r.SeekStart() var read int
assert.Nil(t, err)
// read everything
read := 0
var line string var line string
for err == nil { for err == nil {
line, err = r.ReadNext() line, err = r.ReadNext()
if err == nil { if err == nil {
assert.True(t, len(line) > 0) assert.NotEmpty(t, line)
read++ read++
} }
} }
assert.Equal(t, count*filesCount, read) require.Equal(t, io.EOF, err)
assert.Equal(t, io.EOF, err) assert.Equal(t, tc.filesNum*tc.linesNum, read)
} })
func TestQLogReaderMultipleFiles(t *testing.T) {
// should be large enough
count := 10000
filesCount := 5
testDir := prepareTestDir()
defer func() { _ = os.RemoveAll(testDir) }()
testFiles := prepareTestFiles(testDir, filesCount, count)
r, err := NewQLogReader(testFiles)
assert.Nil(t, err)
assert.NotNil(t, r)
defer r.Close()
// seek to the start
err = r.SeekStart()
assert.Nil(t, err)
// read everything
read := 0
var line string
for err == nil {
line, err = r.ReadNext()
if err == nil {
assert.True(t, len(line) > 0)
read++
} }
}
assert.Equal(t, count*filesCount, read)
assert.Equal(t, io.EOF, err)
} }
func TestQLogReader_Seek(t *testing.T) { func TestQLogReader_Seek(t *testing.T) {
count := 10000 r := newTestQLogReader(t, 2, 10000)
filesCount := 2
testDir := prepareTestDir()
t.Cleanup(func() {
_ = os.RemoveAll(testDir)
})
testFiles := prepareTestFiles(testDir, filesCount, count)
r, err := NewQLogReader(testFiles)
assert.Nil(t, err)
assert.NotNil(t, r)
t.Cleanup(func() {
_ = r.Close()
})
testCases := []struct { testCases := []struct {
name string name string
@ -114,7 +81,7 @@ func TestQLogReader_Seek(t *testing.T) {
want error want error
}{{ }{{
name: "not_too_old", name: "not_too_old",
time: "2020-02-19T04:04:56.920973+03:00", time: "2020-02-18T22:39:35.920973+03:00",
want: nil, want: nil,
}, { }, {
name: "old", name: "old",
@ -122,7 +89,7 @@ func TestQLogReader_Seek(t *testing.T) {
want: nil, want: nil,
}, { }, {
name: "first", name: "first",
time: "2020-02-19T04:09:55.920973+03:00", time: "2020-02-18T22:36:36.920973+03:00",
want: nil, want: nil,
}, { }, {
name: "last", name: "last",
@ -147,28 +114,20 @@ func TestQLogReader_Seek(t *testing.T) {
timestamp, err := time.Parse(time.RFC3339Nano, tc.time) timestamp, err := time.Parse(time.RFC3339Nano, tc.time)
assert.Nil(t, err) assert.Nil(t, err)
if tc.name == "first" {
assert.True(t, true)
}
err = r.SeekTS(timestamp.UnixNano()) err = r.SeekTS(timestamp.UnixNano())
assert.True(t, errors.Is(err, tc.want), err) assert.True(t, errors.Is(err, tc.want))
}) })
} }
} }
func TestQLogReader_ReadNext(t *testing.T) { func TestQLogReader_ReadNext(t *testing.T) {
count := 10 const linesNum = 10
filesCount := 1 const filesNum = 1
r := newTestQLogReader(t, filesNum, linesNum)
testDir := prepareTestDir()
t.Cleanup(func() {
_ = os.RemoveAll(testDir)
})
testFiles := prepareTestFiles(testDir, filesCount, count)
r, err := NewQLogReader(testFiles)
assert.Nil(t, err)
assert.NotNil(t, r)
t.Cleanup(func() {
_ = r.Close()
})
testCases := []struct { testCases := []struct {
name string name string
@ -180,7 +139,7 @@ func TestQLogReader_ReadNext(t *testing.T) {
want: nil, want: nil,
}, { }, {
name: "too_big", name: "too_big",
start: count + 1, start: linesNum + 1,
want: io.EOF, want: io.EOF,
}} }}
@ -199,70 +158,3 @@ func TestQLogReader_ReadNext(t *testing.T) {
}) })
} }
} }
// TODO(e.burkov): Remove the tests below. Make tests above more compelling.
func TestQLogReaderSeek(t *testing.T) {
// more or less big file
count := 10000
filesCount := 2
testDir := prepareTestDir()
defer func() { _ = os.RemoveAll(testDir) }()
testFiles := prepareTestFiles(testDir, filesCount, count)
r, err := NewQLogReader(testFiles)
assert.Nil(t, err)
assert.NotNil(t, r)
defer r.Close()
// CASE 1: NOT TOO OLD LINE
testSeekLineQLogReader(t, r, 300)
// CASE 2: OLD LINE
testSeekLineQLogReader(t, r, count-300)
// CASE 3: FIRST LINE
testSeekLineQLogReader(t, r, 0)
// CASE 4: LAST LINE
testSeekLineQLogReader(t, r, count)
// CASE 5: Seek non-existent (too low)
err = r.SeekTS(123)
assert.NotNil(t, err)
// CASE 6: Seek non-existent (too high)
ts, _ := time.Parse(time.RFC3339, "2100-01-02T15:04:05Z07:00")
err = r.SeekTS(ts.UnixNano())
assert.NotNil(t, err)
}
func testSeekLineQLogReader(t *testing.T, r *QLogReader, lineNumber int) {
line, err := getQLogReaderLine(r, lineNumber)
assert.Nil(t, err)
ts := readQLogTimestamp(line)
assert.NotEqualValues(t, 0, ts)
// try seeking to that line now
err = r.SeekTS(ts)
assert.Nil(t, err)
testLine, err := r.ReadNext()
assert.Nil(t, err)
assert.Equal(t, line, testLine)
}
func getQLogReaderLine(r *QLogReader, lineNumber int) (string, error) {
err := r.SeekStart()
if err != nil {
return "", err
}
for i := 1; i < lineNumber; i++ {
_, err := r.ReadNext()
if err != nil {
return "", err
}
}
return r.ReadNext()
}