-
-
Notifications
You must be signed in to change notification settings - Fork 26
Thoughts on wasm target run via cloudflare workers or similar #160
Replies: 1 comment · 3 replies
-
|
I'm surprised that it's possible to compile parts of this projects to wasm and Configuring the runner actually works in node wasm via api. (node 20 + golang 21.1). What doesn't work is
So yeah it's possible to create a js wasm runner using this project. Just the code for executing the steps is missing. Code to evaluate github actions expressions can be took from nektos/act. Are you interested in creating code for running steps in the embedded js runtime? |
Beta Was this translation helpful? Give feedback.
All reactions
-
|
You have to patch the job_logger.go package logger
import (
"bytes"
"context"
"fmt"
"io"
"net/url"
"regexp"
"strings"
"sync"
"time"
"github.com/ChristopherHX/github-act-runner/protocol"
"github.com/ChristopherHX/github-act-runner/protocol/results"
"nhooyr.io/websocket"
"nhooyr.io/websocket/wsjson"
)
type LiveLogger interface {
io.Closer
SendLog(lines *protocol.TimelineRecordFeedLinesWrapper) error
}
type VssLiveLogger struct {
JobRequest *protocol.AgentJobRequestMessage
Connection *protocol.VssConnection
}
func (*VssLiveLogger) Close() error {
return nil
}
func (logger *VssLiveLogger) SendLog(wrapper *protocol.TimelineRecordFeedLinesWrapper) error {
return logger.Connection.SendLogLines(logger.JobRequest.Plan, logger.JobRequest.Timeline.ID, wrapper)
}
type WebsocketLivelogger struct {
JobRequest *protocol.AgentJobRequestMessage
Connection *protocol.VssConnection
ws *websocket.Conn
FeedStreamUrl string
}
func (logger *WebsocketLivelogger) Close() error {
if logger.ws != nil {
err := logger.ws.Close(websocket.StatusGoingAway, "Bye!")
logger.ws = nil
return err
}
return nil
}
func (logger *WebsocketLivelogger) Connect() error {
err := logger.Close()
if err != nil && logger.Connection.Trace {
fmt.Printf("Failed to close old websocket connection %s\n", err.Error())
}
if logger.Connection.Trace {
fmt.Printf("Try to connect to websocket %s\n", logger.FeedStreamUrl)
}
re := regexp.MustCompile("(?i)^http(s?)://")
feedStreamUrl, err := url.Parse(re.ReplaceAllString(logger.FeedStreamUrl, "ws$1://"))
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5)
defer cancel()
logger.ws, _, err = websocket.Dial(ctx, feedStreamUrl.String(), &websocket.DialOptions{
// HTTPClient: logger.Connection.HttpClient(),
// HTTPHeader: http.Header{
// "Authorization": []string{"Bearer " + logger.Connection.Token},
// "User-Agent": []string{"github-act-runner/1.0.0"},
// },
})
return err
}
func (logger *WebsocketLivelogger) SendLog(lines *protocol.TimelineRecordFeedLinesWrapper) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5)
defer cancel()
return wsjson.Write(ctx, logger.ws, lines)
}
type WebsocketLiveloggerWithFallback struct {
JobRequest *protocol.AgentJobRequestMessage
Connection *protocol.VssConnection
currentLogger LiveLogger
FeedStreamUrl string
ForceWebsock bool
}
func (logger *WebsocketLiveloggerWithFallback) InitializeVssLogger() {
logger.Close()
logger.currentLogger = &VssLiveLogger{
JobRequest: logger.JobRequest,
Connection: logger.Connection,
}
}
func (logger *WebsocketLiveloggerWithFallback) Initialize() {
logger.Close()
if false {
wslogger := &WebsocketLivelogger{
JobRequest: logger.JobRequest,
Connection: logger.Connection,
FeedStreamUrl: logger.FeedStreamUrl,
}
err := wslogger.Connect()
if err == nil {
logger.currentLogger = wslogger
return
} else if logger.Connection.Trace {
fmt.Printf("Failed to connect to websocket %s, fallback to vsslogger\n", err.Error())
}
}
if !logger.ForceWebsock {
logger.InitializeVssLogger()
}
}
func (logger *WebsocketLiveloggerWithFallback) Close() error {
if logger.currentLogger != nil {
err := logger.currentLogger.Close()
logger.currentLogger = nil
return err
}
return nil
}
func (logger *WebsocketLiveloggerWithFallback) SendLog(wrapper *protocol.TimelineRecordFeedLinesWrapper) error {
if logger.currentLogger == nil {
logger.Initialize()
}
err := logger.currentLogger.SendLog(wrapper)
if err != nil {
if logger.Connection.Trace {
fmt.Printf("Failed to send webconsole log %s\n", err.Error())
}
if wslogger, err := logger.currentLogger.(*WebsocketLivelogger); err {
if err := wslogger.Connect(); err != nil {
if !logger.ForceWebsock {
if logger.Connection.Trace {
fmt.Printf("Failed to reconnect to websocket %s, fallback to vsslogger\n", err.Error())
}
logger.InitializeVssLogger()
return logger.currentLogger.SendLog(wrapper)
}
return err
}
err := logger.currentLogger.SendLog(wrapper)
if err != nil {
if !logger.ForceWebsock {
if logger.Connection.Trace {
fmt.Printf("Failed to send webconsole log %s, fallback to vsslogger\n", err.Error())
}
logger.InitializeVssLogger()
return logger.currentLogger.SendLog(wrapper)
}
return err
}
return nil
}
}
return err
}
type BufferedLiveLogger struct {
LiveLogger
logchan chan *protocol.TimelineRecordFeedLinesWrapper
logfinished chan struct{}
}
func (logger *BufferedLiveLogger) sendLogs(logchan chan *protocol.TimelineRecordFeedLinesWrapper, logfinished chan struct{}) {
defer close(logfinished)
for {
lines, ok := <-logchan
if !ok {
return
}
st := time.Now()
lp := st
logsexit := false
for {
b := false
div := lp.Sub(st)
if div > time.Second {
break
}
select {
case line, ok := <-logchan:
if ok {
if line.StepID == lines.StepID {
lines.Count += line.Count
lines.Value = append(lines.Value, line.Value...)
} else {
_ = logger.LiveLogger.SendLog(lines)
lines = line
st = time.Now()
}
} else {
b = true
}
case <-time.After(time.Second - div):
b = true
}
if b {
break
}
lp = time.Now()
}
_ = logger.LiveLogger.SendLog(lines)
if logsexit {
return
}
}
}
func (logger *BufferedLiveLogger) Close() error {
if logger.logchan != nil {
close(logger.logchan)
logger.logchan = nil
<-logger.logfinished
}
return nil
}
func (logger *BufferedLiveLogger) SendLog(wrapper *protocol.TimelineRecordFeedLinesWrapper) error {
if logger.logchan == nil {
logchan := make(chan *protocol.TimelineRecordFeedLinesWrapper, 64)
logger.logchan = logchan
logfinished := make(chan struct{})
logger.logfinished = logfinished
go logger.sendLogs(logchan, logfinished)
}
logger.logchan <- wrapper
return nil
}
type JobLogger struct {
JobRequest *protocol.AgentJobRequestMessage
Connection *protocol.VssConnection
TimelineRecords *protocol.TimelineRecordWrapper
CurrentRecord int64
CurrentLine int64
JobBuffer bytes.Buffer
CurrentBuffer bytes.Buffer
linefeedregex *regexp.Regexp
Logger LiveLogger
lineBuffer []byte
IsResults bool
ChangeId int64
CurrentJobLine int64
FirstBlock bool
FirstJobBlock bool
linesync sync.Mutex
loggersync sync.Mutex
}
func (logger *JobLogger) Write(p []byte) (n int, err error) {
logger.linesync.Lock()
defer logger.linesync.Unlock()
logger.lineBuffer = append(logger.lineBuffer, p...)
if i := bytes.LastIndexByte(logger.lineBuffer, byte('\n')); i != -1 {
logger.Log(string(logger.lineBuffer[:i]))
logger.lineBuffer = logger.lineBuffer[i+1:]
}
return len(p), nil
}
func (logger *JobLogger) current() *protocol.TimelineRecord {
if logger.CurrentRecord < logger.TimelineRecords.Count {
return logger.TimelineRecords.Value[logger.CurrentRecord]
}
return nil
}
func (logger *JobLogger) Current() *protocol.TimelineRecord {
logger.loggersync.Lock()
defer logger.loggersync.Unlock()
return logger.current()
}
func (logger *JobLogger) MoveNext() *protocol.TimelineRecord {
return logger.MoveNextExt(true)
}
func (logger *JobLogger) MoveNextExt(startNextRecord bool) *protocol.TimelineRecord {
logger.loggersync.Lock()
defer logger.loggersync.Unlock()
cur := logger.current()
if cur == nil {
return nil
}
logger.uploadBlock(cur, true)
logger.CurrentRecord++
logger.CurrentBuffer.Reset()
if c := logger.current(); c != nil && startNextRecord {
c.Start()
return c
}
_ = logger.update()
return nil
}
func (logger *JobLogger) uploadBlock(cur *protocol.TimelineRecord, finalBlock bool) {
if finalBlock && logger.CurrentBuffer.Len() > 0 || logger.IsResults && (finalBlock || logger.CurrentBuffer.Len() > 2*1024*1024) {
if logger.IsResults {
rs := &results.ResultsService{
Connection: logger.Connection,
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
rs.UploadResultsStepLogAsync(ctx, logger.JobRequest.Plan.PlanID, logger.JobRequest.JobID, cur.ID, &logger.CurrentBuffer, int64(logger.CurrentBuffer.Len()), logger.FirstBlock, finalBlock, logger.CurrentLine)
logger.FirstBlock = false
logger.CurrentBuffer.Reset()
} else if finalBlock {
if logid, err := logger.Connection.UploadLogFile(logger.JobRequest.Timeline.ID, logger.JobRequest, logger.CurrentBuffer.String()); err == nil {
cur.Log = &protocol.TaskLogReference{ID: logid}
}
}
}
}
func (logger *JobLogger) Finish() {
logger.loggersync.Lock()
defer logger.loggersync.Unlock()
logger.uploadJobBlob(true)
}
func (logger *JobLogger) uploadJobBlob(finalBlock bool) {
if (finalBlock && logger.JobBuffer.Len() > 0 || logger.IsResults && (finalBlock || logger.JobBuffer.Len() > 2*1024*1024)) && len(logger.TimelineRecords.Value) > 0 {
if logger.IsResults {
rs := &results.ResultsService{
Connection: logger.Connection,
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
rs.UploadResultsJobLogAsync(ctx, logger.JobRequest.Plan.PlanID, logger.JobRequest.JobID, &logger.JobBuffer, int64(logger.JobBuffer.Len()), logger.FirstJobBlock, finalBlock, logger.CurrentJobLine)
logger.FirstJobBlock = false
logger.JobBuffer.Reset()
} else if finalBlock {
if logid, err := logger.Connection.UploadLogFile(logger.JobRequest.Timeline.ID, logger.JobRequest, logger.JobBuffer.String()); err == nil {
logger.TimelineRecords.Value[0].Log = &protocol.TaskLogReference{ID: logid}
_ = logger.update()
}
}
}
}
func (logger *JobLogger) Update() error {
logger.loggersync.Lock()
defer logger.loggersync.Unlock()
return logger.update()
}
func (logger *JobLogger) update() error {
if logger.IsResults {
logger.ChangeId++
updatereq := &results.StepsUpdateRequest{}
updatereq.ChangeOrder = logger.ChangeId
updatereq.WorkflowRunBackendID = logger.JobRequest.Plan.PlanID
updatereq.WorkflowJobRunBackendID = logger.TimelineRecords.Value[0].ID
updatereq.Steps = make([]results.Step, len(logger.TimelineRecords.Value)-1)
for i, rec := range logger.TimelineRecords.Value[1:] {
updatereq.Steps[i] = results.ConvertTimelineRecordToStep(*rec)
}
rs := &results.ResultsService{
Connection: logger.Connection,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
return rs.UpdateWorkflowStepsAsync(ctx, updatereq)
}
return logger.Connection.UpdateTimeLine(logger.JobRequest.Timeline.ID, logger.JobRequest, logger.TimelineRecords)
}
func (logger *JobLogger) Append(record protocol.TimelineRecord) *protocol.TimelineRecord {
logger.loggersync.Lock()
defer logger.loggersync.Unlock()
if l := len(logger.TimelineRecords.Value); l > 0 {
record.Order = logger.TimelineRecords.Value[l-1].Order + 1
}
logger.TimelineRecords.Value = append(logger.TimelineRecords.Value, &record)
logger.TimelineRecords.Count = int64(len(logger.TimelineRecords.Value))
return &record
}
func (logger *JobLogger) Insert(record protocol.TimelineRecord) *protocol.TimelineRecord {
logger.loggersync.Lock()
defer logger.loggersync.Unlock()
x := append(make([]*protocol.TimelineRecord, 0), logger.TimelineRecords.Value[:logger.CurrentRecord]...)
y := append(x, &record)
z := append(y, logger.TimelineRecords.Value[logger.CurrentRecord:]...)
logger.TimelineRecords.Value = z
logger.TimelineRecords.Count = int64(len(logger.TimelineRecords.Value))
return &record
}
func (logger *JobLogger) Log(lines string) {
logger.loggersync.Lock()
defer logger.loggersync.Unlock()
if logger.linefeedregex == nil {
logger.linefeedregex = regexp.MustCompile(`(\r\n|\r|\n)`)
}
if logger.CurrentLine == 0 {
logger.CurrentLine = 1
logger.FirstBlock = true
logger.FirstJobBlock = true
}
lines = logger.linefeedregex.ReplaceAllString(strings.TrimSuffix(lines, "\r\n"), "\n")
_, _ = logger.JobBuffer.WriteString(lines + "\n")
cur := logger.current()
if cur == nil {
return
}
_, _ = logger.CurrentBuffer.WriteString(lines + "\n")
cline := logger.CurrentLine
wrapper := &protocol.TimelineRecordFeedLinesWrapper{
StartLine: &cline,
Value: strings.Split(lines, "\n"),
StepID: cur.ID,
}
wrapper.Count = int64(len(wrapper.Value))
logger.CurrentLine += wrapper.Count
logger.CurrentJobLine += wrapper.Count
timeline := regexp.MustCompile("^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{7}Z ")
length := len("2021-04-02T15:50:14.6619714Z ")
for i := 0; i < len(wrapper.Value); i++ {
if timeline.MatchString(wrapper.Value[i]) {
wrapper.Value[i] = wrapper.Value[i][length:]
}
}
logger.Logger.SendLog(wrapper)
logger.uploadBlock(cur, false)
logger.uploadJobBlob(false)
}This file can be used to test a minimal runner package main
import (
"context"
"fmt"
"net/http"
"os"
"strings"
"sync"
"time"
"github.com/ChristopherHX/github-act-runner/actionsrunner"
"github.com/ChristopherHX/github-act-runner/protocol"
"github.com/ChristopherHX/github-act-runner/runnerconfiguration"
"github.com/google/go-github/v45/github"
"github.com/google/uuid"
)
type noSurvey struct {
}
func (*noSurvey) GetInput(prompt string, def string) string {
return def
}
func (*noSurvey) GetSelectInput(prompt string, options []string, def string) string {
return def
}
func (*noSurvey) GetMultiSelectInput(prompt string, options []string) []string {
return []string{}
}
type JobRequest struct {
Payload *github.WorkflowJobEvent
WorkerArgs []string
}
type workerData struct {
JobID int64
WorkerName string
ActualJobRequest chan *JobRequest
ExpectedJobRequest *JobRequest
cancelListener context.CancelFunc
Settings *runnerconfiguration.RunnerSettings
}
type TokenCache struct {
RunnerToken string
GithubAuth *protocol.GitHubAuthResult
Lock sync.Mutex
}
type GitHubEventMonitor struct {
byJobID sync.Map
byWorkerName sync.Map
tokenCache sync.Map
Config *Config
}
type InMemoryRunner struct {
actionsrunner.ConsoleLogger
Data *workerData
Config *Config
}
func (arunner *InMemoryRunner) WriteJson(path string, value interface{}) error {
return nil
}
func (arunner *InMemoryRunner) ReadJson(path string, value interface{}) error {
return os.ErrNotExist
}
func (arunner *InMemoryRunner) Remove(fname string) error {
return nil
}
type MegaScalerContextData struct {
Labels []string
JobID string
}
func (arunner *InMemoryRunner) ExecWorker(run *actionsrunner.RunRunner, wc actionsrunner.WorkerContext, jobreq *protocol.AgentJobRequestMessage, src []byte) error {
wc.Logger().Log(fmt.Sprintf("%vHello wasm", time.Now().UTC().Format("2006-01-02T15:04:05.0000000Z ")))
wc.Logger().Current().Complete("Succeeded")
wc.Logger().MoveNext()
wc.Logger().TimelineRecords.Value[0].Complete("Succeeded")
wc.Logger().Update()
wc.FinishJob("Succeeded", &map[string]protocol.VariableValue{})
return nil
}
func equalFoldSubSet(left []string, right []string) bool {
for _, l := range left {
found := false
for _, r := range right {
if strings.EqualFold(l, r) {
found = true
break
}
}
if !found {
return false
}
}
return true
}
func equalFoldSet(left []string, right []string) bool {
if len(left) != len(right) {
return false
}
return equalFoldSubSet(left, right) && equalFoldSubSet(right, left)
}
func (s *GitHubEventMonitor) ServeHTTP(w http.ResponseWriter, r *http.Request) {
payload, err := github.ValidatePayload(r, []byte(s.Config.Secret))
if err != nil {
return
}
event, err := github.ParseWebHook(github.WebHookType(r), payload)
if err != nil {
return
}
switch event := event.(type) {
case *github.WorkflowJobEvent:
if strings.EqualFold(event.GetAction(), "queued") {
configureRunner(event, s, nil)
} else if strings.EqualFold(event.GetAction(), "in_progress") {
fmt.Println("runner in progress")
job := event.GetWorkflowJob()
name := job.GetRunnerName()
var expected *workerData
var got *workerData
if re, ok := s.byWorkerName.LoadAndDelete(name); ok {
e := re.(*workerData)
expected = e
fmt.Println("runner matched")
} else {
fmt.Printf("runner not found %v\n", name)
}
if re, ok := s.byJobID.LoadAndDelete(job.GetID()); ok {
e := re.(*workerData)
got = e
if expected.JobID != got.JobID {
fmt.Println("assigned unexpected job")
if !equalFoldSet(expected.ExpectedJobRequest.Payload.WorkflowJob.Labels, got.ExpectedJobRequest.Payload.WorkflowJob.Labels) {
if equalFoldSubSet(got.ExpectedJobRequest.Payload.WorkflowJob.Labels, expected.ExpectedJobRequest.Payload.WorkflowJob.Labels) {
fmt.Println("unexpected job has less labels than expected job, recreate runner")
go configureRunner(expected.ExpectedJobRequest.Payload, s, expected)
}
}
go s.remove(got)
//got.cancelListener()
//expected.cancelListener()
} else {
fmt.Println("job matched")
}
} else {
fmt.Printf("job not found %v\n", name)
}
if expected != nil {
if got != nil {
expected.ActualJobRequest <- got.ExpectedJobRequest
} else {
expected.ActualJobRequest <- nil
// unknown job with less labels hijacked the job
go configureRunner(expected.ExpectedJobRequest.Payload, s, expected)
}
}
} else {
job := event.GetWorkflowJob()
s.byWorkerName.Delete(job.GetRunnerName())
if re, ok := s.byJobID.LoadAndDelete(job.GetID()); ok {
e := re.(*workerData)
s.remove(e)
}
}
}
w.WriteHeader(200)
}
func (s *GitHubEventMonitor) remove(got *workerData) {
rr := &runnerconfiguration.RemoveRunner{}
rr.Pat = s.Config.Pat
if got.Settings == nil {
<-time.After(time.Second * 30)
}
if got.Settings != nil {
fmt.Printf("Removing runner %v\n", got.Settings.Instances[0].Agent.Name)
rtc, _ := s.tokenCache.LoadOrStore(got.Settings.Instances[0].RegistrationURL, &TokenCache{})
tc := rtc.(*TokenCache)
var err error
auth := tc.GithubAuth
if auth != nil {
_, err = rr.Remove(got.Settings, &noSurvey{}, auth)
if err == nil {
fmt.Printf("removed with short lived token\n")
}
} else {
err = fmt.Errorf("no cache")
}
if err != nil && (err.Error() == "no cache" || strings.Contains(err.Error(), "Http DELETE Request finished 401") || strings.Contains(err.Error(), "Http DELETE Request finished 403")) {
func() {
tc.Lock.Lock()
defer tc.Lock.Unlock()
rr.Token = tc.RunnerToken
_, err = rr.Remove(got.Settings, &noSurvey{}, nil)
if err != nil && (strings.Contains(err.Error(), "Http DELETE Request finished 401") || strings.Contains(err.Error(), "Http DELETE Request finished 403")) {
rr.Token = ""
_, err = rr.Remove(got.Settings, &noSurvey{}, nil)
if err != nil {
fmt.Printf("failed to remove runner: %v\n", err)
} else {
fmt.Printf("removed with PAT token\n")
}
} else if err != nil {
fmt.Printf("failed to remove runner: %v\n", err)
} else {
fmt.Printf("removed with runner token\n")
}
}()
} else if err != nil {
fmt.Printf("failed to remove runner: %v\n", err)
}
if err == nil {
s.byWorkerName.Delete(got.WorkerName)
}
} else {
fmt.Println("can't remove runner, settings not set")
}
}
func configureRunner(event *github.WorkflowJobEvent, s *GitHubEventMonitor, previousData *workerData) {
var err error
job := event.GetWorkflowJob()
conf := &runnerconfiguration.ConfigureRunner{}
conf.Unattended = true
conf.Labels = job.Labels
conf.Name = uuid.NewString()
worker := s.Config.GetByLabels(conf.Labels)
if worker == nil {
return
}
fmt.Println("config runner")
wd := &workerData{
JobID: job.GetID(),
WorkerName: conf.Name,
ExpectedJobRequest: &JobRequest{
Payload: event,
WorkerArgs: worker.Args,
},
}
if previousData != nil {
wd.ActualJobRequest = previousData.ActualJobRequest
} else {
wd.ActualJobRequest = make(chan *JobRequest)
}
var listenerctx context.Context
listenerctx, wd.cancelListener = context.WithCancel(context.Background())
s.byWorkerName.Store(conf.Name, wd)
s.byJobID.Store(job.GetID(), wd)
conf.NoDefaultLabels = true
conf.URL = event.GetRepo().GetHTMLURL()
conf.Pat = s.Config.Pat
conf.Ephemeral = true
rtc, _ := s.tokenCache.LoadOrStore(conf.URL, &TokenCache{})
cl := &http.Client{}
tc := rtc.(*TokenCache)
var settings *runnerconfiguration.RunnerSettings
auth := tc.GithubAuth
if auth != nil {
csettings, err := conf.Configure(&runnerconfiguration.RunnerSettings{}, &noSurvey{}, auth)
if err == nil {
fmt.Println("Successfully reused short lived auth")
settings = csettings
}
}
if settings == nil {
func() {
tc.Lock.Lock()
defer tc.Lock.Unlock()
if len(tc.RunnerToken) > 0 {
conf.Token = tc.RunnerToken
auth, err = conf.Authenicate(cl, &noSurvey{})
} else {
err = fmt.Errorf("not cached")
}
if err == nil {
fmt.Println("Successfully reused runner token")
tc.GithubAuth = auth
settings, _ = conf.Configure(&runnerconfiguration.RunnerSettings{}, &noSurvey{}, auth)
} else {
conf.Token = ""
auth, _ := conf.Authenicate(cl, &noSurvey{})
tc.RunnerToken = conf.Token
tc.GithubAuth = auth
settings, _ = conf.Configure(&runnerconfiguration.RunnerSettings{}, &noSurvey{}, auth)
fmt.Println("Used PAT token")
}
}()
}
wd.Settings = settings
we := &InMemoryRunner{
Data: wd,
Config: s.Config,
}
run := &actionsrunner.RunRunner{
Settings: settings,
Version: "megascaler-v0.0.0",
}
go run.Run(we, listenerctx, context.Background())
}
type Worker struct {
Labels []string `yaml:"labels"`
Args []string `yaml:"args"`
}
type Config struct {
Worker []*Worker `yaml:"worker"`
Secret string `yaml:"secret"`
Pat string `yaml:"pat"`
MaxParallel int `yaml:"max_parallel"`
Address string `yaml:"address"`
}
func (config *Config) GetByLabels(labels []string) *Worker {
for _, worker := range config.Worker {
if equalFoldSet(labels, worker.Labels) {
return worker
}
}
return nil
}
func main() {
// src, _ := os.ReadFile("config.yml")
// conf := &Config{}
// yaml.Unmarshal(src, conf)
// http.ListenAndServe(conf.Address, &GitHubEventMonitor{
// Config: conf,
// })
conf := &runnerconfiguration.ConfigureRunner{}
conf.Unattended = true
conf.Labels = []string{"wasm"}
conf.Name = uuid.NewString()
conf.Pat = "<your pat>"
conf.URL = "<repo url>"
conf.Trace = true
conf.Ephemeral = true
settings, err := conf.Configure(&runnerconfiguration.RunnerSettings{}, &noSurvey{}, nil)
if err != nil {
fmt.Println(err.Error())
}
we := &InMemoryRunner{
Data: nil,
Config: nil,
}
run := &actionsrunner.RunRunner{
Settings: settings,
Version: "megascaler-v0.0.0",
}
err = run.Run(we, context.Background(), context.Background())
if err != nil {
fmt.Println(err.Error())
}
}(The main.go file is a edit of https://github.com/ChristopherHX/megascaler) |
Beta Was this translation helpful? Give feedback.
All reactions
-
Beta Was this translation helpful? Give feedback.
All reactions
-
🎉 1
-
|
The files to run wasm with node can be found here:
|
Beta Was this translation helpful? Give feedback.

Uh oh!
There was an error while loading. Please reload this page.
-
Just wanted to get thoughts on this idea. For non container jobs i was thinking it might be possible to deploy the runner compiled to a wasm target, it might need an embedded js runtime and deno looks promising.
References:
Beta Was this translation helpful? Give feedback.
All reactions