diff --git a/go.mod b/go.mod
index 7821424..7a91a22 100644
--- a/go.mod
+++ b/go.mod
@@ -18,6 +18,7 @@ require (
github.com/go-chi/render v1.0.1
github.com/go-git/go-git/v5 v5.2.0
github.com/google/gofuzz v1.2.0 // indirect
+ github.com/google/uuid v1.1.2
github.com/gorilla/mux v1.8.0 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
diff --git a/go.sum b/go.sum
index 06f6ea0..8fc3717 100644
--- a/go.sum
+++ b/go.sum
@@ -162,6 +162,7 @@ github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OI
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
diff --git a/pkg/models/models.go b/pkg/models/models.go
index cfbf0b4..741bb82 100644
--- a/pkg/models/models.go
+++ b/pkg/models/models.go
@@ -3,6 +3,7 @@ package models
import (
"errors"
"github.com/go-git/go-git/v5/plumbing"
+ log "github.com/sirupsen/logrus"
"net/http"
"time"
)
@@ -69,3 +70,57 @@ func (u *User) Bind(*http.Request) error {
return nil
}
+
+type Job struct {
+ PackageName string
+ Status BuildStatus
+ Logs BuildLog `json:",omitempty"`
+ Uuid string
+ Time time.Time
+}
+
+// logsToKeep are the number of log lines to keep when sending a job.
+const logsToKeep = 10
+
+func (j Job) Render(w http.ResponseWriter, r *http.Request) error {
+ // Remove everything but the last 10 log lines. To get all
+ // logs the /job/{uuid}/logs route can be used. This is because
+ // the logs can get quite large, and if you want information about a single
+ // job it's not really useful to get all the logs. This is especially true
+ // when retrieving *all* jobs. In that case you really don't want all logs to
+ // be sent over as well
+ if len(j.Logs) > logsToKeep {
+ j.Logs = j.Logs[len(j.Logs)-logsToKeep:]
+ }
+
+ return nil
+}
+
+type BuildStatus int
+
+const (
+ BuildStatusPending BuildStatus = iota
+ BuildStatusPullingRepo
+ BuildStatusRunning
+ BuildStatusUploading
+ BuildStatusDone
+
+ BuildStatusErrored
+)
+
+type LogLine struct {
+ Time time.Time
+ Level log.Level
+ message string
+}
+
+func (j LogLine) Bind(r *http.Request) error {
+ return nil
+}
+
+type BuildLog []LogLine
+
+func (j LogLine) Render(w http.ResponseWriter, r *http.Request) error {
+
+ return nil
+}
diff --git a/pkg/store/jobstore.go b/pkg/store/jobstore.go
new file mode 100644
index 0000000..242abca
--- /dev/null
+++ b/pkg/store/jobstore.go
@@ -0,0 +1,217 @@
+package store
+
+import (
+ "bytes"
+ "encoding/gob"
+ "github.com/dgraph-io/badger/v2"
+ "github.com/finitum/AAAAA/pkg/models"
+ "github.com/google/uuid"
+ "github.com/pkg/errors"
+ "sync"
+ "time"
+)
+
+type JobStoreWrapper struct {
+ *Badger
+
+ sync.Mutex
+ callbacks map[string][]func(line *models.LogLine)
+}
+
+func NewJobStore(badger *Badger) JobStore {
+ return &JobStoreWrapper{
+ Badger: badger,
+ callbacks: make(map[string][]func(line *models.LogLine)),
+ }
+}
+
+func (b *JobStoreWrapper) NewJob(name string) (*models.Job, error) {
+ jid, err := uuid.NewUUID()
+ if err != nil {
+ return nil, errors.Wrap(err, "uuid")
+ }
+
+ job := models.Job{
+ PackageName: name,
+ Status: models.BuildStatusPending,
+ Logs: nil,
+ Uuid: jid.String(),
+ Time: time.Now(),
+ }
+
+ err = b.db.Update(func(txn *badger.Txn) error {
+ var value bytes.Buffer
+
+ enc := gob.NewEncoder(&value)
+ err := enc.Encode(job)
+ if err != nil {
+ return errors.Wrap(err, "gob encode")
+ }
+
+ entry := badger.NewEntry([]byte(jobPrefix+jid.String()), value.Bytes()).WithTTL(jobTTL)
+ return errors.Wrap(txn.SetEntry(entry), "badger transaction")
+ })
+
+ return &job, err
+}
+
+func (b *JobStoreWrapper) AppendToJobLog(jid string, l *models.LogLine) error {
+ for _, cb := range b.callbacks[jid] {
+ cb(l)
+ }
+
+ return b.db.Update(func(txn *badger.Txn) error {
+ var job models.Job
+
+ // Get the job
+ item, err := txn.Get([]byte(jobPrefix + jid))
+ if err == badger.ErrKeyNotFound {
+ return ErrNotExists
+ } else if err != nil {
+ return errors.Wrap(err, "badger get")
+ }
+ err = item.Value(func(val []byte) error {
+ buf := bytes.NewBuffer(val)
+
+ dec := gob.NewDecoder(buf)
+ return errors.Wrap(dec.Decode(&job), "gob decode")
+ })
+ if err != nil {
+ return err
+ }
+
+ // Update the job
+ job.Logs = append(job.Logs, *l)
+
+ // Put the job back
+ var value bytes.Buffer
+ enc := gob.NewEncoder(&value)
+ err = enc.Encode(job)
+ if err != nil {
+ return errors.Wrap(err, "gob encode")
+ }
+
+ entry := badger.NewEntry([]byte(jobPrefix+jid), value.Bytes()).WithTTL(jobTTL)
+ return errors.Wrap(txn.SetEntry(entry), "badger transaction")
+ })
+}
+
+func (b *JobStoreWrapper) SetJobStatus(jid string, status models.BuildStatus) error {
+ return b.db.Update(func(txn *badger.Txn) error {
+ var job models.Job
+
+ // Get the job
+ item, err := txn.Get([]byte(jobPrefix + jid))
+ if err == badger.ErrKeyNotFound {
+ return ErrNotExists
+ } else if err != nil {
+ return errors.Wrap(err, "badger get")
+ }
+ err = item.Value(func(val []byte) error {
+ buf := bytes.NewBuffer(val)
+
+ dec := gob.NewDecoder(buf)
+ return errors.Wrap(dec.Decode(&job), "gob decode")
+ })
+ if err != nil {
+ return err
+ }
+
+ // Update the job
+ job.Status = status
+
+ // Put the job back
+ var value bytes.Buffer
+ enc := gob.NewEncoder(&value)
+ err = enc.Encode(job)
+ if err != nil {
+ return errors.Wrap(err, "gob encode")
+ }
+
+ entry := badger.NewEntry([]byte(jobPrefix+jid), value.Bytes()).WithTTL(jobTTL)
+ return errors.Wrap(txn.SetEntry(entry), "badger transaction")
+ })
+}
+
+func (b *JobStoreWrapper) GetLogs(jid string) (logs []models.LogLine, _ error) {
+ return logs, b.db.View(func(txn *badger.Txn) error {
+ var job models.Job
+
+ // Get the job
+ item, err := txn.Get([]byte(jobPrefix + jid))
+ if err == badger.ErrKeyNotFound {
+ return ErrNotExists
+ } else if err != nil {
+ return errors.Wrap(err, "badger get")
+ }
+ err = item.Value(func(val []byte) error {
+ buf := bytes.NewBuffer(val)
+
+ dec := gob.NewDecoder(buf)
+ return errors.Wrap(dec.Decode(&job), "gob decode")
+ })
+ if err != nil {
+ return err
+ }
+
+ logs = job.Logs
+
+ return nil
+ })
+}
+
+func (b *JobStoreWrapper) GetJobs() (jobs []models.Job, _ error) {
+ return jobs, b.db.View(func(txn *badger.Txn) error {
+ it := txn.NewIterator(badger.DefaultIteratorOptions)
+ defer it.Close()
+ prefix := []byte(jobPrefix)
+ for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
+ item := it.Item()
+ var job models.Job
+ err := item.Value(func(val []byte) error {
+ buf := bytes.NewBuffer(val)
+
+ dec := gob.NewDecoder(buf)
+ return errors.Wrap(dec.Decode(&job), "gob decode")
+ })
+ jobs = append(jobs, job)
+ if err != nil {
+ return errors.Wrap(err, "badger iteration")
+ }
+ }
+ return nil
+ })
+}
+
+func (b *JobStoreWrapper) AddLogListener(uuid string, cb func(line *models.LogLine)) {
+ b.Lock()
+ defer b.Unlock()
+
+ b.callbacks[uuid] = append(b.callbacks[uuid], cb)
+}
+
+func (b *JobStoreWrapper) GetJob(jid string) (*models.Job, error) {
+ var job models.Job
+
+ return &job, b.db.View(func(txn *badger.Txn) error {
+
+ // Get the job
+ item, err := txn.Get([]byte(jobPrefix + jid))
+ if err == badger.ErrKeyNotFound {
+ return ErrNotExists
+ } else if err != nil {
+ return errors.Wrap(err, "badger get")
+ }
+ err = item.Value(func(val []byte) error {
+ buf := bytes.NewBuffer(val)
+
+ dec := gob.NewDecoder(buf)
+ return errors.Wrap(dec.Decode(&job), "gob decode")
+ })
+ if err != nil {
+ return err
+ }
+
+ return nil
+ })
+}
diff --git a/pkg/store/store.go b/pkg/store/store.go
index 14f31b5..b601e95 100644
--- a/pkg/store/store.go
+++ b/pkg/store/store.go
@@ -18,6 +18,7 @@ type Store interface {
}
const pkgPrefix = "pkg_"
+
type PackageStore interface {
// GetPackage gets a package definition from the store MUST return ErrNotExists if the package does not exist
GetPackage(name string) (*models.Pkg, error)
@@ -32,6 +33,7 @@ type PackageStore interface {
}
const userPrefix = "user_"
+
type UserStore interface {
// GetUser gets a user from the store MUST return ErrNotExists if the user does not exist
GetUser(name string) (*models.User, error)
@@ -88,3 +90,33 @@ func GetPartialCacheEntry(cache Cache, term string) (aur.Results, bool, error) {
return nil, false, ErrNotExists
}
+
+const jobPrefix = "job_"
+
+// Keep job logs for 10 days
+const jobTTL = 10 * 24 * time.Hour
+
+type JobStore interface {
+ // NewJob creates a new job. It returns the newly created job, with in it the
+ // uuid of the job which can be used for further lookup.
+ NewJob(name string) (*models.Job, error)
+
+ // AppendToJobLog appends a line to a job's log
+ AppendToJobLog(uuid string, l *models.LogLine) error
+
+ // SetJobStatus updates the status of this job
+ SetJobStatus(uuid string, status models.BuildStatus) error
+
+ // GetLogs returns the entire log of this job
+ GetLogs(uuid string) ([]models.LogLine, error)
+
+ // GetJobs returns all jobs
+ GetJobs() ([]models.Job, error)
+
+ // AddLogListener takes a function which will be called every time a new logline is
+ // added the job targeted with the uuid
+ AddLogListener(uuid string, cb func(line *models.LogLine))
+
+ // GetJob gets a job by uuid
+ GetJob(uuid string) (*models.Job, error)
+}
diff --git a/services/buildlogs/filter.go b/services/buildlogs/filter.go
new file mode 100644
index 0000000..e5467c6
--- /dev/null
+++ b/services/buildlogs/filter.go
@@ -0,0 +1,84 @@
+package main
+
+import (
+ "github.com/finitum/AAAAA/pkg/models"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// FilterJobs filters, sorts and paginates a list of jobs.
+//
+// * nameFilter is a keyword which if non-empty, must be included in the package name, or otherwise the job is filtered out.
+// * statusFilter is a keyword which if non-empty, filters out any job that doesn't have a build status equal to the number
+// passed in. This parameter may start with a `!`, which negates the filter and filters out any job with a status equal to
+// the number passed in.
+// * sortKey is the key in the remaining list of (filtered) jobs is sorted by before it's paginated. This parameter may
+// be either `time` or `name`. If this parameter isn't `time` or `name` it will automatically be sorted by time.
+// * start and limit are used for pagination. Jobs returned are the filtered, sorted jobs sliced by [start:start+limit].
+// limit may be -1 to signify no limit.
+func FilterJobs(jobs []models.Job, nameFilter, statusFilter, sortKey string, start, limit int) ([]models.Job, error) {
+ if statusFilter != "" {
+ reverse := false
+ if statusFilter[0] == '!' {
+ statusFilter = statusFilter[1:]
+ reverse = true
+ }
+
+ statusNumber, err := strconv.Atoi(statusFilter)
+ if err != nil {
+ return nil, err
+ }
+
+ fc := 0
+ for _, job := range jobs {
+ if (job.Status == models.BuildStatus(statusNumber) && !reverse) ||
+ (job.Status != models.BuildStatus(statusNumber) && reverse) {
+ jobs[fc] = job
+ fc++
+ }
+ }
+ jobs = jobs[:fc]
+ }
+
+ if nameFilter != "" {
+ fc := 0
+ for _, job := range jobs {
+ if strings.Contains(job.PackageName, nameFilter) {
+ jobs[fc] = job
+ fc++
+ }
+ }
+ jobs = jobs[:fc]
+ }
+
+ sort.Slice(jobs, func(i, j int) bool {
+ switch sortKey {
+ case "name":
+ name1 := jobs[i].PackageName
+ name2 := jobs[j].PackageName
+ return name1 < name2
+ case "time":
+ fallthrough
+ default:
+ return jobs[i].Time.After(jobs[j].Time)
+ }
+ })
+
+ if len(jobs) > start {
+ if limit == -1 {
+ jobs = jobs[start:]
+ } else {
+ end := start + limit
+ if end > len(jobs) {
+ end = len(jobs)
+ }
+
+ jobs = jobs[start:end]
+ }
+ } else {
+ jobs = []models.Job{}
+ }
+
+ return jobs, nil
+}
diff --git a/services/buildlogs/filter_test.go b/services/buildlogs/filter_test.go
new file mode 100644
index 0000000..ff9b585
--- /dev/null
+++ b/services/buildlogs/filter_test.go
@@ -0,0 +1,184 @@
+package main
+
+import (
+ "github.com/finitum/AAAAA/pkg/models"
+ "github.com/stretchr/testify/assert"
+ "testing"
+ "time"
+)
+
+var testJobList = []models.Job{
+ {
+ PackageName: "ccc",
+ Status: 0,
+ Logs: nil,
+ Uuid: "1",
+ Time: time.Unix(10, 10),
+ },
+ {
+ PackageName: "aaa",
+ Status: 1,
+ Logs: nil,
+ Uuid: "2",
+ Time: time.Unix(11, 10),
+ },
+ {
+ PackageName: "bbb",
+ Status: 0,
+ Logs: nil,
+ Uuid: "2",
+ Time: time.Unix(12, 10),
+ },
+}
+
+func TestFilterLimitOne(t *testing.T) {
+ jobs := make([]models.Job, len(testJobList))
+ copy(jobs, testJobList)
+
+ // By defaul it sorts based on time. So it should return the latest time (bbb)
+ jobs, err := FilterJobs(jobs, "", "", "", 0, 1)
+ assert.NoError(t, err)
+ assert.Equal(t, len(jobs), 1)
+ assert.Equal(t, jobs[0].PackageName, "bbb")
+}
+
+func TestFilterLimitTwo(t *testing.T) {
+ jobs := make([]models.Job, len(testJobList))
+ copy(jobs, testJobList)
+
+ // By defaul it sorts based on time. So it should return the latest two times (bbb, aaa)
+ jobs, err := FilterJobs(jobs, "", "", "", 0, 2)
+ assert.NoError(t, err)
+ assert.Equal(t, len(jobs), 2)
+ assert.Equal(t, jobs[0].PackageName, "bbb")
+ assert.Equal(t, jobs[1].PackageName, "aaa")
+}
+
+func TestFilterLimitZero(t *testing.T) {
+ jobs := make([]models.Job, len(testJobList))
+ copy(jobs, testJobList)
+
+ // Limit 0 should return nothing
+ jobs, err := FilterJobs(jobs, "", "", "", 0, 0)
+ assert.NoError(t, err)
+ assert.Equal(t, len(jobs), 0)
+}
+
+func TestFilterStatusOne(t *testing.T) {
+ jobs := make([]models.Job, len(testJobList))
+ copy(jobs, testJobList)
+
+ // There's only one with status 1 (bbb)
+ jobs, err := FilterJobs(jobs, "", "1", "", 0, -1)
+ assert.NoError(t, err)
+ assert.Equal(t, len(jobs), 1)
+ assert.Equal(t, jobs[0].PackageName, "aaa")
+}
+
+func TestFilterStatusZero(t *testing.T) {
+ jobs := make([]models.Job, len(testJobList))
+ copy(jobs, testJobList)
+
+ // There are two with status 0 (bbb and ccc) but ccc has the lowest time so should come last
+ jobs, err := FilterJobs(jobs, "", "0", "", 0, -1)
+ assert.NoError(t, err)
+ assert.Equal(t, len(jobs), 2)
+ assert.Equal(t, jobs[0].PackageName, "bbb")
+ assert.Equal(t, jobs[1].PackageName, "ccc")
+}
+
+func TestFilterStatusNotZero(t *testing.T) {
+ jobs := make([]models.Job, len(testJobList))
+ copy(jobs, testJobList)
+
+ // There are two with status 0 (bbb and ccc) so !0 should return a
+ jobs, err := FilterJobs(jobs, "", "!0", "", 0, -1)
+ assert.NoError(t, err)
+ assert.Equal(t, len(jobs), 1)
+ assert.Equal(t, jobs[0].PackageName, "aaa")
+}
+
+func TestFilterNameExact(t *testing.T) {
+ jobs := make([]models.Job, len(testJobList))
+ copy(jobs, testJobList)
+
+ // There are two with status 0 (bbb and ccc) so !0 should return a
+ jobs, err := FilterJobs(jobs, "aaa", "", "", 0, -1)
+ assert.NoError(t, err)
+ assert.Equal(t, len(jobs), 1)
+ assert.Equal(t, jobs[0].PackageName, "aaa")
+}
+
+func TestFilterNamePartial(t *testing.T) {
+ jobs := make([]models.Job, len(testJobList))
+ copy(jobs, testJobList)
+
+ // There are two with status 0 (bbb and ccc) so !0 should return a
+ jobs, err := FilterJobs(jobs, "a", "", "", 0, -1)
+ assert.NoError(t, err)
+ assert.Equal(t, len(jobs), 1)
+ assert.Equal(t, jobs[0].PackageName, "aaa")
+}
+
+func TestFilterSortTime(t *testing.T) {
+ jobs := make([]models.Job, len(testJobList))
+ copy(jobs, testJobList)
+
+ // Sort based on time explicitly
+ jobs, err := FilterJobs(jobs, "", "", "time", 0, -1)
+ assert.NoError(t, err)
+ assert.Equal(t, len(jobs), 3)
+ assert.Equal(t, jobs[0].PackageName, "bbb")
+ assert.Equal(t, jobs[1].PackageName, "aaa")
+ assert.Equal(t, jobs[2].PackageName, "ccc")
+}
+
+func TestFilterSortNothing(t *testing.T) {
+ jobs := make([]models.Job, len(testJobList))
+ copy(jobs, testJobList)
+
+ // Sorting on nothing also sorts on time by default
+ jobs, err := FilterJobs(jobs, "", "", "", 0, -1)
+ assert.NoError(t, err)
+ assert.Equal(t, len(jobs), 3)
+ assert.Equal(t, jobs[0].PackageName, "bbb")
+ assert.Equal(t, jobs[1].PackageName, "aaa")
+ assert.Equal(t, jobs[2].PackageName, "ccc")
+}
+
+func TestFilterSortName(t *testing.T) {
+ jobs := make([]models.Job, len(testJobList))
+ copy(jobs, testJobList)
+
+ jobs, err := FilterJobs(jobs, "", "", "name", 0, -1)
+ assert.NoError(t, err)
+ assert.Equal(t, len(jobs), 3)
+ assert.Equal(t, jobs[0].PackageName, "aaa")
+ assert.Equal(t, jobs[1].PackageName, "bbb")
+ assert.Equal(t, jobs[2].PackageName, "ccc")
+}
+
+func TestFilterStartZero(t *testing.T) {
+ jobs := make([]models.Job, len(testJobList))
+ copy(jobs, testJobList)
+
+ // Starting at zero is the default
+ jobs, err := FilterJobs(jobs, "", "", "name", 0, -1)
+ assert.NoError(t, err)
+ assert.Equal(t, len(jobs), 3)
+ assert.Equal(t, jobs[0].PackageName, "aaa")
+ assert.Equal(t, jobs[1].PackageName, "bbb")
+ assert.Equal(t, jobs[2].PackageName, "ccc")
+}
+
+func TestFilterStartOne(t *testing.T) {
+ jobs := make([]models.Job, len(testJobList))
+ copy(jobs, testJobList)
+
+ // Starting at zero is the default
+ jobs, err := FilterJobs(jobs, "", "", "name", 1, -1)
+ assert.NoError(t, err)
+ assert.Equal(t, len(jobs), 2)
+ assert.Equal(t, jobs[0].PackageName, "bbb")
+ assert.Equal(t, jobs[1].PackageName, "ccc")
+}
diff --git a/services/buildlogs/main.go b/services/buildlogs/main.go
new file mode 100644
index 0000000..7cb6751
--- /dev/null
+++ b/services/buildlogs/main.go
@@ -0,0 +1,46 @@
+package main
+
+import (
+ "context"
+ "github.com/finitum/AAAAA/internal/cors"
+ "github.com/finitum/AAAAA/pkg/store"
+ "github.com/go-chi/chi"
+ "github.com/go-chi/chi/middleware"
+ "github.com/go-chi/render"
+ log "github.com/sirupsen/logrus"
+ "net/http"
+ "os"
+)
+
+func init() {
+ log.SetLevel(log.TraceLevel)
+}
+
+func main() {
+ r := chi.NewRouter()
+ r.Use(middleware.StripSlashes)
+ r.Use(middleware.Logger)
+ r.Use(middleware.Recoverer)
+ r.Use(cors.AllowAll)
+ r.Use(render.SetContentType(render.ContentTypeJSON))
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ badger, err := store.OpenBadger(os.TempDir() + "/AAAAA-jobs")
+ if err != nil {
+ log.Fatalf("Couldn't open ristretto cache: %v", err)
+ }
+ defer badger.Close()
+ badger.StartGC(ctx)
+
+ js := store.NewJobStore(badger)
+ rs := NewRoutes(js)
+
+ r.Post("/job/{pkgname}", rs.NewJob)
+ r.Get("/job/{uuid}", rs.GetJob)
+ r.Get("/jobs", rs.GetJobs)
+ r.Get("/job/{uuid}/logs", rs.GetLogs)
+ r.Post("/job/{uuid}/logs", rs.AddLogs)
+
+ log.Fatal(http.ListenAndServe(":5002", r))
+}
diff --git a/services/buildlogs/routes.go b/services/buildlogs/routes.go
new file mode 100644
index 0000000..aff4cb2
--- /dev/null
+++ b/services/buildlogs/routes.go
@@ -0,0 +1,162 @@
+package main
+
+import (
+ "github.com/finitum/AAAAA/pkg/models"
+ "github.com/finitum/AAAAA/pkg/store"
+ "github.com/finitum/AAAAA/services/control_server/routes"
+ "github.com/go-chi/chi"
+ "github.com/go-chi/render"
+ log "github.com/sirupsen/logrus"
+ "net/http"
+ "strconv"
+)
+
+type Routes struct {
+ jobs store.JobStore
+}
+
+func NewRoutes(j store.JobStore) *Routes {
+ return &Routes{
+ j,
+ }
+}
+
+func (rs *Routes) NewJob(w http.ResponseWriter, r *http.Request) {
+ pkgname := chi.URLParam(r, "pkgname")
+
+ job, err := rs.jobs.NewJob(pkgname)
+ if err != nil {
+ _ = render.Render(w, r, routes.ErrServerError(err))
+ log.Errorf("failed to create new job (%v)", err)
+ return
+ }
+
+ _ = render.Render(w, r, job)
+ w.WriteHeader(http.StatusCreated)
+}
+
+func (rs *Routes) GetJob(w http.ResponseWriter, r *http.Request) {
+ uuid := chi.URLParam(r, "uuid")
+
+ job, err := rs.jobs.GetJob(uuid)
+ if err != nil {
+ _ = render.Render(w, r, routes.ErrServerError(err))
+ log.Errorf("failed to get job (%v)", err)
+ return
+ }
+
+ _ = render.Render(w, r, job)
+}
+
+func (rs *Routes) GetLogs(w http.ResponseWriter, r *http.Request) {
+ uuid := chi.URLParam(r, "uuid")
+
+ dbLogs, err := rs.jobs.GetLogs(uuid)
+ if err != nil {
+ _ = render.Render(w, r, routes.ErrServerError(err))
+ log.Errorf("failed to get logs (%v)", err)
+ return
+ }
+
+ logs := make([]render.Renderer, len(dbLogs))
+ for i, logLine := range logs {
+ logs[i] = logLine
+ }
+
+ _ = render.RenderList(w, r, logs)
+}
+
+// GetJobs possible routes:
+// All jobs: /jobs
+// Get 10 jobs: /jobs?limit=10
+// Get jobs starting at job 10: /jobs?start=10
+// Get only the 10th job: /jobs?start=10&limit=1
+//
+// # Sorting is performed before filtering and limiting
+// Sort jobs by time: /jobs?sort=time
+// Sort jobs by package name: /jobs?sort=name
+//
+// # Filtering is performed before sorting
+// Return only jobs with status 0 (pending) : /jobs?status=0
+// Return only jobs with a status that's not 0 (pending) : /jobs?status=!0
+// Return only jobs with `aaa` in the name: /jobs?name=aaa
+func (rs *Routes) GetJobs(w http.ResponseWriter, r *http.Request) {
+ limit := r.URL.Query().Get("limit")
+ start := r.URL.Query().Get("start")
+ sortKey := r.URL.Query().Get("sort")
+ statusFilter := r.URL.Query().Get("status")
+ nameFilter := r.URL.Query().Get("name")
+
+ var err error
+
+ startNum := 0
+ if start != "" {
+ startNum, err = strconv.Atoi(start)
+ if err != nil {
+ _ = render.Render(w, r, routes.ErrInvalidRequest(err))
+ log.Errorf("Couldn't convert status to number (%v)", err)
+ return
+ }
+ if startNum < 0 {
+ startNum = 0
+ }
+ }
+
+ limitNum := -1
+ if limit != "" {
+ limitNum, err = strconv.Atoi(limit)
+ if err != nil {
+ _ = render.Render(w, r, routes.ErrInvalidRequest(err))
+ log.Errorf("Couldn't convert status to number (%v)", err)
+ return
+ }
+ if limitNum < 0 {
+ limitNum = -1
+ }
+ }
+
+ // We will never return more than 5000 results
+ if limitNum > 5000 {
+ limitNum = 5000
+ }
+
+ dbJobs, err := rs.jobs.GetJobs()
+ if err != nil {
+ // TODO: maybe put these error functions in some kind of shared module (internal maybe?)
+ _ = render.Render(w, r, routes.ErrServerError(err))
+ log.Errorf("failed to get jobs (%v)", err)
+ return
+ }
+
+ dbJobs, err = FilterJobs(dbJobs, nameFilter, statusFilter, sortKey, startNum, limitNum)
+ if err != nil {
+ _ = render.Render(w, r, routes.ErrInvalidRequest(err))
+ log.Errorf("Couldn't convert status to number (%v)", err)
+ return
+ }
+
+ jobs := make([]render.Renderer, len(dbJobs))
+ for i, logLine := range dbJobs {
+ jobs[i] = logLine
+ }
+
+ _ = render.RenderList(w, r, jobs)
+}
+
+func (rs *Routes) AddLogs(w http.ResponseWriter, r *http.Request) {
+ uuid := chi.URLParam(r, "uuid")
+
+ var logLine models.LogLine
+
+ if err := render.Bind(r, &logLine); err != nil {
+ _ = render.Render(w, r, routes.ErrInvalidRequest(err))
+ return
+ }
+
+ err := rs.jobs.AppendToJobLog(uuid, &logLine)
+ if err != nil {
+ _ = render.Render(w, r, routes.ErrServerError(err))
+ log.Errorf("failed to add to logs (%v)", err)
+ return
+ }
+}
diff --git a/services/frontend/src/App.vue b/services/frontend/src/App.vue
index 5503ef3..44ad6bd 100644
--- a/services/frontend/src/App.vue
+++ b/services/frontend/src/App.vue
@@ -8,8 +8,8 @@
+
+
diff --git a/services/frontend/src/components/PackageTable.vue b/services/frontend/src/components/tables/PackageTable.vue
similarity index 96%
rename from services/frontend/src/components/PackageTable.vue
rename to services/frontend/src/components/tables/PackageTable.vue
index 074aaeb..da85d12 100644
--- a/services/frontend/src/components/PackageTable.vue
+++ b/services/frontend/src/components/tables/PackageTable.vue
@@ -59,9 +59,9 @@
import { defineComponent, ref } from "vue";
import { frequencyToDuration, Package } from "@/api/Models";
import { FontAwesomeIcon } from "@fortawesome/vue-fontawesome";
-import UpdatePackage from "@/components/UpdatePackage.vue";
+import UpdatePackage from "@/components/modals/UpdatePackage.vue";
import { DeletePackage, loggedIn } from "@/api/API";
-import Dialog from "@/components/Dialog.vue";
+import Dialog from "@/components/modals/Dialog.vue";
import { loadPackages, packages } from "@/api/packages";
export default defineComponent({
diff --git a/services/frontend/src/components/UsersTable.vue b/services/frontend/src/components/tables/UsersTable.vue
similarity index 97%
rename from services/frontend/src/components/UsersTable.vue
rename to services/frontend/src/components/tables/UsersTable.vue
index 68b4b27..2160fde 100644
--- a/services/frontend/src/components/UsersTable.vue
+++ b/services/frontend/src/components/tables/UsersTable.vue
@@ -68,9 +68,9 @@
import { defineComponent, ref } from "vue";
import { User } from "@/api/Models";
import { FontAwesomeIcon } from "@fortawesome/vue-fontawesome";
-import Dialog from "@/components/Dialog.vue";
+import Dialog from "@/components/modals/Dialog.vue";
import { DeleteUser } from "@/api/API";
-import Login from "@/components/Login.vue";
+import Login from "@/components/modals/Login.vue";
import { loadUsers, users } from "@/api/users";
export default defineComponent({
diff --git a/services/frontend/src/router/index.ts b/services/frontend/src/router/index.ts
index 07a1849..5146ae2 100644
--- a/services/frontend/src/router/index.ts
+++ b/services/frontend/src/router/index.ts
@@ -10,6 +10,11 @@ const routes: Array = [
path: "/users",
name: "Users",
component: import("../views/Users.vue")
+ },
+ {
+ path: "/jobs",
+ name: "Jobs",
+ component: import("../views/Jobs.vue")
}
];
diff --git a/services/frontend/src/views/Home.vue b/services/frontend/src/views/Home.vue
index 4753c6a..8b6a302 100644
--- a/services/frontend/src/views/Home.vue
+++ b/services/frontend/src/views/Home.vue
@@ -16,7 +16,7 @@
diff --git a/services/frontend/src/views/Users.vue b/services/frontend/src/views/Users.vue
index e12c7c6..ded56aa 100644
--- a/services/frontend/src/views/Users.vue
+++ b/services/frontend/src/views/Users.vue
@@ -15,7 +15,7 @@