Do not store last_updated in the config file anymore

This commit is contained in:
Andrey Meshkov 2019-02-10 21:44:16 +03:00
parent 9a03190a62
commit 9ff420bb52
4 changed files with 40 additions and 10 deletions

1
app.go
View File

@ -106,6 +106,7 @@ func run(args options) {
log.Printf("Couldn't load filter %d contents due to %s", filter.ID, err)
// clear LastUpdated so it gets fetched right away
}
if len(filter.Rules) == 0 {
filter.LastUpdated = time.Time{}
}

View File

@ -115,7 +115,7 @@ func getLogSettings() logSettings {
// parseConfig loads configuration from the YAML file
func parseConfig() error {
configFile := config.getConfigFilename()
log.Tracef("Reading YAML file: %s", configFile)
log.Printf("Reading config file: %s", configFile)
yamlFile, err := readConfigFile()
if err != nil {
log.Printf("Couldn't read config file: %s", err)

View File

@ -39,6 +39,7 @@ type Server struct {
dnsFilter *dnsfilter.Dnsfilter // DNS filter instance
queryLog *queryLog // Query log instance
stats *stats // General server statistics
once sync.Once
sync.RWMutex
ServerConfig
@ -126,8 +127,9 @@ func (s *Server) startInternal(config *ServerConfig) error {
return errorx.Decorate(err, "failed to load stats from querylog")
}
// TODO: Start starts rotators, stop stops rotators
once.Do(func() {
// TODO: Think about reworking this, the current approach won't work properly if AG Home is restarted periodically
s.once.Do(func() {
log.Printf("Start DNS server periodic jobs")
go s.queryLog.periodicQueryLogRotate()
go s.queryLog.runningTop.periodicHourlyTopRotate()
go s.stats.statsRotator()
@ -436,5 +438,3 @@ func (s *Server) genSOA(request *dns.Msg) []dns.RR {
}
return []dns.RR{&soa}
}
var once sync.Once

View File

@ -26,7 +26,7 @@ type filter struct {
URL string `json:"url"`
Name string `json:"name" yaml:"name"`
RulesCount int `json:"rulesCount" yaml:"-"`
LastUpdated time.Time `json:"lastUpdated,omitempty" yaml:"last_updated,omitempty"`
LastUpdated time.Time `json:"lastUpdated,omitempty" yaml:"-"`
dnsfilter.Filter `yaml:",inline"`
}
@ -95,6 +95,15 @@ func refreshFiltersIfNecessary(force bool) int {
filter.ID = assignUniqueFilterID()
}
// Re-load it from the disk before updating
if len(filter.Rules) == 0 {
err := filter.load()
if err != nil {
log.Printf("Failed to reload filter %s: %s", filter.URL, err)
continue
}
}
updated, err := filter.update(force)
if err != nil {
log.Printf("Failed to update filter %s: %s\n", filter.URL, err)
@ -162,9 +171,6 @@ func (filter *filter) update(force bool) (bool, error) {
log.Printf("Downloading update for filter %d from %s", filter.ID, filter.URL)
// use the same update period for failed filter downloads to avoid flooding with requests
filter.LastUpdated = time.Now()
resp, err := client.Get(filter.URL)
if resp != nil && resp.Body != nil {
defer resp.Body.Close()
@ -217,7 +223,11 @@ func (filter *filter) save() error {
log.Printf("Saving filter %d contents to: %s", filter.ID, filterFilePath)
body := []byte(strings.Join(filter.Rules, "\n"))
return safeWriteFile(filterFilePath, body)
err := safeWriteFile(filterFilePath, body)
// update LastUpdated field after saving the file
filter.LastUpdated = filter.LastTimeUpdated()
return err
}
// loads filter contents from the file in dataDir
@ -245,6 +255,7 @@ func (filter *filter) load() error {
filter.RulesCount = rulesCount
filter.Rules = rules
filter.LastUpdated = filter.LastTimeUpdated()
return nil
}
@ -253,3 +264,21 @@ func (filter *filter) load() error {
func (filter *filter) Path() string {
return filepath.Join(config.ourWorkingDir, dataDir, filterDir, strconv.FormatInt(filter.ID, 10)+".txt")
}
// LastUpdated returns the time when the filter was last time updated
func (filter *filter) LastTimeUpdated() time.Time {
filterFilePath := filter.Path()
if _, err := os.Stat(filterFilePath); os.IsNotExist(err) {
// if the filter file does not exist, return 0001-01-01
return time.Time{}
}
s, err := os.Stat(filterFilePath)
if err != nil {
// if the filter file does not exist, return 0001-01-01
return time.Time{}
}
// filter file modified time
return s.ModTime()
}