rename: oop log parser result -> record
This commit is contained in:
@ -8,7 +8,7 @@ package main
|
||||
// width, height int
|
||||
// }
|
||||
|
||||
// func (s *chartReport) digest(results iterator) error {
|
||||
// func (s *chartReport) digest(records iterator) error {
|
||||
// w := os.Stdout
|
||||
|
||||
// donut := chart.DonutChart{
|
||||
@ -22,7 +22,7 @@ package main
|
||||
// Height: s.height,
|
||||
// }
|
||||
|
||||
// results.each(func(r result) {
|
||||
// records.each(func(r record) {
|
||||
// v := chart.Value{
|
||||
// Label: r.domain + r.page + ": " + strconv.Itoa(r.visits),
|
||||
// Value: float64(r.visits),
|
||||
|
@ -16,15 +16,15 @@ func filterBy(fn ...filterFunc) *filter {
|
||||
return &filter{filters: fn}
|
||||
}
|
||||
|
||||
// transform the result
|
||||
func (f *filter) digest(results iterator) error {
|
||||
f.src = results
|
||||
// transform the record
|
||||
func (f *filter) digest(records iterator) error {
|
||||
f.src = records
|
||||
return nil
|
||||
}
|
||||
|
||||
// each yields an analysis result
|
||||
func (f *filter) each(yield resultFn) error {
|
||||
return f.src.each(func(r result) {
|
||||
// each yields only the filtered records
|
||||
func (f *filter) each(yield recordFn) error {
|
||||
return f.src.each(func(r record) {
|
||||
if !f.check(r) {
|
||||
return
|
||||
}
|
||||
@ -32,8 +32,8 @@ func (f *filter) each(yield resultFn) error {
|
||||
})
|
||||
}
|
||||
|
||||
// check all the filters against the result
|
||||
func (f *filter) check(r result) bool {
|
||||
// check all the filters against the record
|
||||
func (f *filter) check(r record) bool {
|
||||
for _, fi := range f.filters {
|
||||
if !fi(r) {
|
||||
return false
|
||||
|
@ -2,20 +2,20 @@ package main
|
||||
|
||||
import "strings"
|
||||
|
||||
type filterFunc func(result) bool
|
||||
type filterFunc func(record) bool
|
||||
|
||||
func noopFilter(r result) bool {
|
||||
func noopFilter(r record) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func notUsing(filter filterFunc) filterFunc {
|
||||
return func(r result) bool {
|
||||
return func(r record) bool {
|
||||
return !filter(r)
|
||||
}
|
||||
}
|
||||
|
||||
func domainExtFilter(domains ...string) filterFunc {
|
||||
return func(r result) bool {
|
||||
return func(r record) bool {
|
||||
for _, domain := range domains {
|
||||
if strings.HasSuffix(r.domain, "."+domain) {
|
||||
return true
|
||||
@ -26,11 +26,11 @@ func domainExtFilter(domains ...string) filterFunc {
|
||||
}
|
||||
|
||||
func domainFilter(domain string) filterFunc {
|
||||
return func(r result) bool {
|
||||
return func(r record) bool {
|
||||
return strings.Contains(r.domain, domain)
|
||||
}
|
||||
}
|
||||
|
||||
func orgDomainsFilter(r result) bool {
|
||||
func orgDomainsFilter(r record) bool {
|
||||
return strings.HasSuffix(r.domain, ".org")
|
||||
}
|
||||
|
@ -12,21 +12,21 @@ import (
|
||||
)
|
||||
|
||||
type group struct {
|
||||
sum map[string]result // metrics per group key
|
||||
sum map[string]record // metrics per group key
|
||||
keys []string // unique group keys
|
||||
key groupFunc
|
||||
}
|
||||
|
||||
func groupBy(key groupFunc) *group {
|
||||
return &group{
|
||||
sum: make(map[string]result),
|
||||
sum: make(map[string]record),
|
||||
key: key,
|
||||
}
|
||||
}
|
||||
|
||||
// digest the results
|
||||
func (g *group) digest(results iterator) error {
|
||||
return results.each(func(r result) {
|
||||
// digest the records
|
||||
func (g *group) digest(records iterator) error {
|
||||
return records.each(func(r record) {
|
||||
k := g.key(r)
|
||||
|
||||
if _, ok := g.sum[k]; !ok {
|
||||
@ -37,8 +37,8 @@ func (g *group) digest(results iterator) error {
|
||||
})
|
||||
}
|
||||
|
||||
// each yields the grouped results
|
||||
func (g *group) each(yield resultFn) error {
|
||||
// each yields the grouped records
|
||||
func (g *group) each(yield recordFn) error {
|
||||
sort.Strings(g.keys)
|
||||
|
||||
for _, k := range g.keys {
|
||||
|
@ -1,15 +1,15 @@
|
||||
package main
|
||||
|
||||
type groupFunc func(result) string
|
||||
type groupFunc func(record) string
|
||||
|
||||
// domainGrouper groups by domain.
|
||||
// but it keeps the other fields.
|
||||
// for example: it returns pages as well, but you shouldn't use them.
|
||||
// exercise: write a function that erases the unnecessary data.
|
||||
func domainGrouper(r result) string {
|
||||
func domainGrouper(r record) string {
|
||||
return r.domain
|
||||
}
|
||||
|
||||
func pageGrouper(r result) string {
|
||||
func pageGrouper(r record) string {
|
||||
return r.domain + r.page
|
||||
}
|
||||
|
@ -21,13 +21,13 @@ func newJSONLog(r io.Reader) *jsonLog {
|
||||
return &jsonLog{reader: r}
|
||||
}
|
||||
|
||||
func (j *jsonLog) each(yield resultFn) error {
|
||||
func (j *jsonLog) each(yield recordFn) error {
|
||||
defer readClose(j.reader)
|
||||
|
||||
dec := json.NewDecoder(bufio.NewReader(j.reader))
|
||||
|
||||
for {
|
||||
var r result
|
||||
var r record
|
||||
|
||||
err := dec.Decode(&r)
|
||||
if err == io.EOF {
|
||||
|
@ -15,8 +15,8 @@ type logCount struct {
|
||||
n int
|
||||
}
|
||||
|
||||
func (lc *logCount) each(yield resultFn) error {
|
||||
err := lc.iterator.each(func(r result) {
|
||||
func (lc *logCount) each(yield recordFn) error {
|
||||
err := lc.iterator.each(func(r record) {
|
||||
lc.n++
|
||||
yield(r)
|
||||
})
|
||||
|
@ -13,9 +13,9 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type resultFn func(result)
|
||||
type recordFn func(record)
|
||||
|
||||
type iterator interface{ each(resultFn) error }
|
||||
type iterator interface{ each(recordFn) error }
|
||||
type digester interface{ digest(iterator) error }
|
||||
|
||||
type transform interface {
|
||||
|
@ -10,21 +10,21 @@ import (
|
||||
|
||||
const fieldsLength = 4
|
||||
|
||||
type result struct {
|
||||
type record struct {
|
||||
domain string
|
||||
page string
|
||||
visits int
|
||||
uniques int
|
||||
}
|
||||
|
||||
func (r result) sum(other result) result {
|
||||
func (r record) sum(other record) record {
|
||||
r.visits += other.visits
|
||||
r.uniques += other.uniques
|
||||
return r
|
||||
}
|
||||
|
||||
// UnmarshalText to a *result
|
||||
func (r *result) UnmarshalText(p []byte) (err error) {
|
||||
// UnmarshalText to a *record
|
||||
func (r *record) UnmarshalText(p []byte) (err error) {
|
||||
fields := strings.Fields(string(p))
|
||||
if len(fields) != fieldsLength {
|
||||
return fmt.Errorf("wrong number of fields %q", fields)
|
||||
@ -41,8 +41,8 @@ func (r *result) UnmarshalText(p []byte) (err error) {
|
||||
return validate(*r)
|
||||
}
|
||||
|
||||
// UnmarshalJSON to a *result
|
||||
func (r *result) UnmarshalJSON(data []byte) error {
|
||||
// UnmarshalJSON to a *record
|
||||
func (r *record) UnmarshalJSON(data []byte) error {
|
||||
var re struct {
|
||||
Domain string
|
||||
Page string
|
||||
@ -54,7 +54,7 @@ func (r *result) UnmarshalJSON(data []byte) error {
|
||||
return err
|
||||
}
|
||||
|
||||
*r = result{re.Domain, re.Page, re.Visits, re.Uniques}
|
||||
*r = record{re.Domain, re.Page, re.Visits, re.Uniques}
|
||||
return validate(*r)
|
||||
}
|
||||
|
||||
@ -62,21 +62,21 @@ func (r *result) UnmarshalJSON(data []byte) error {
|
||||
func parseStr(name, v string) (int, error) {
|
||||
n, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("result.UnmarshalText %q: %v", name, err)
|
||||
return 0, fmt.Errorf("record.UnmarshalText %q: %v", name, err)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func validate(r result) (err error) {
|
||||
func validate(r record) (err error) {
|
||||
switch {
|
||||
case r.domain == "":
|
||||
err = errors.New("result.domain cannot be empty")
|
||||
err = errors.New("record.domain cannot be empty")
|
||||
case r.page == "":
|
||||
err = errors.New("result.page cannot be empty")
|
||||
err = errors.New("record.page cannot be empty")
|
||||
case r.visits < 0:
|
||||
err = errors.New("result.visits cannot be negative")
|
||||
err = errors.New("record.visits cannot be negative")
|
||||
case r.uniques < 0:
|
||||
err = errors.New("result.uniques cannot be negative")
|
||||
err = errors.New("record.uniques cannot be negative")
|
||||
}
|
||||
return
|
||||
}
|
@ -20,13 +20,13 @@ func newTextLog(r io.Reader) *textLog {
|
||||
return &textLog{reader: r}
|
||||
}
|
||||
|
||||
func (p *textLog) each(yield resultFn) error {
|
||||
func (p *textLog) each(yield recordFn) error {
|
||||
defer readClose(p.reader)
|
||||
|
||||
in := bufio.NewScanner(p.reader)
|
||||
|
||||
for in.Scan() {
|
||||
r := new(result)
|
||||
r := new(record)
|
||||
|
||||
if err := r.UnmarshalText(in.Bytes()); err != nil {
|
||||
return err
|
||||
|
@ -27,7 +27,7 @@ func newTextReport() *textReport {
|
||||
return new(textReport)
|
||||
}
|
||||
|
||||
func (s *textReport) digest(results iterator) error {
|
||||
func (s *textReport) digest(records iterator) error {
|
||||
w := tabwriter.NewWriter(os.Stdout, minWidth, tabWidth, padding, ' ', flags)
|
||||
|
||||
write := fmt.Fprintf
|
||||
@ -35,8 +35,8 @@ func (s *textReport) digest(results iterator) error {
|
||||
write(w, "DOMAINS\tPAGES\tVISITS\tUNIQUES\n")
|
||||
write(w, "-------\t-----\t------\t-------\n")
|
||||
|
||||
var total result
|
||||
results.each(func(r result) {
|
||||
var total record
|
||||
records.each(func(r record) {
|
||||
total = total.sum(r)
|
||||
|
||||
write(w, "%s\t%s\t%d\t%d\n", r.domain, r.page, r.visits, r.uniques)
|
||||
|
Reference in New Issue
Block a user