M src/cmd/builtins/unpack.go => src/cmd/builtins/unpack.go +8 -0
@@ 103,6 103,7 @@ func unpackMain() {
dest = flag.String("dest", "./", "Unpack destination.")
archiveType = flag.String("type", "", "Archive type, one of 'tar', 'tar.gz', 'tar.bz2', 'tar.xz', (default guessed from file extension.)")
noUnwrap = flag.Bool("no-unwrap", false, "Don't unwrap single top level directories.")
+ keep = flag.Bool("keep", false, "Don't remove the archive after unpacking it.")
)
flag.Parse()
@@ 156,4 157,11 @@ func unpackMain() {
default:
die("Don't know how to unpack archive type '%s'\n", *archiveType)
}
+
+ if inf != os.Stdin && !*keep {
+ err = os.Remove(inf.Name())
+ if err != nil {
+ die("Error removing ")
+ }
+ }
}
M src/cmd/hermes/build.go => src/cmd/hermes/build.go +163 -152
@@ 20,12 20,10 @@ import (
"sync/atomic"
"time"
- "github.com/andrewchambers/hermes/extrasqlite"
"github.com/andrewchambers/hermes/hscript/hscript"
"github.com/andrewchambers/hermes/pkgs"
"github.com/andrewchambers/hermes/pkgs/interp"
"github.com/andrewchambers/hermes/proctools"
- "github.com/bvinc/go-sqlite-lite/sqlite3"
"github.com/cenkalti/backoff"
"github.com/kballard/go-shellquote"
"github.com/pkg/errors"
@@ 44,10 42,10 @@ type pkgBuildOpts struct {
type buildState struct {
tmpDir string
fetchSocketPath string
- mirrorDBPath string
- mirrorDB *sqlite3.Conn
+ mirrors *interp.MirrorMap
nextGCRootPath func() string
nextPkgEdnPath func() string
+ nextMirrorsEdnPath func() string
nextSignalSocketPath func() string
}
@@ 143,28 141,52 @@ func startBackgroundCommandInErrGroup(ctx context.Context, g *errgroup.Group, cm
// The reason fetch is an external process is so it can reliably
// be cancelled.
func startFetchurlServer(ctx context.Context, g *errgroup.Group, cfg *Config, state *buildState) (cleanup func(), err error) {
+ cleanup = func() {}
+ defer func() {
+ if err != nil {
+ cleanup()
+ }
+ }()
self, err := os.Executable()
if err != nil {
- return nil, errors.Wrap(err, "unable to determine path to hermes binary")
+ return cleanup, errors.Wrap(err, "unable to determine path to hermes binary")
}
- contentMirror := ""
+ mirrorsFile, err := os.Create(state.nextMirrorsEdnPath())
+ if err != nil {
+ return cleanup, errors.Wrap(err, "unable to create mirror database")
+ }
+ cleanup = wrapCleanup(cleanup, func() {
+ _ = os.Remove(mirrorsFile.Name())
+ })
+
+ var marshaled []byte
- if cfg.ContentMirror != nil {
- contentMirror = cfg.ContentMirror.String()
+ state.mirrors.QueryMap(func(m map[string][]string) {
+ marshaled, err = edn.Marshal(m)
+ if err != nil {
+ panic(err)
+ }
+ })
+
+ _, err = mirrorsFile.Write(marshaled)
+ if err != nil {
+ return cleanup, err
+ }
+ err = mirrorsFile.Close()
+ if err != nil {
+ return cleanup, err
}
opts := struct {
- Socket string
- Mirrors string
- ContentMirror string `edn:"content-mirror"`
- WorkDir string `edn:"work-dir"`
+ Socket string
+ Mirrors string
+ WorkDir string `edn:"work-dir"`
}{
- Socket: state.fetchSocketPath,
- Mirrors: state.mirrorDBPath,
- ContentMirror: contentMirror,
- WorkDir: state.tmpDir,
+ Socket: state.fetchSocketPath,
+ Mirrors: mirrorsFile.Name(),
+ WorkDir: state.tmpDir,
}
// We pass options to the fetchurl server via stdin
@@ 172,7 194,7 @@ func startFetchurlServer(ctx context.Context, g *errgroup.Group, cfg *Config, st
// the command line.
optsEdn, err := edn.Marshal(opts)
if err != nil {
- return nil, err
+ return cleanup, err
}
args := []string{
@@ 183,7 205,13 @@ func startFetchurlServer(ctx context.Context, g *errgroup.Group, cfg *Config, st
cmd := exec.Command(self, args...)
cmd.Stdin = bytes.NewReader(optsEdn)
cmd.Stderr = os.Stderr
- return startBackgroundCommandInErrGroup(ctx, g, cmd)
+ stopServer, err := startBackgroundCommandInErrGroup(ctx, g, cmd)
+ if err != nil {
+ return cleanup, err
+ }
+
+ cleanup = wrapCleanup(cleanup, stopServer)
+ return cleanup, nil
}
func startRemoteMasterConnection(ctx context.Context, g *errgroup.Group, remoteState *remoteBuildState) (cleanup func(), err error) {
@@ 306,11 334,17 @@ func createRemoteTempdir(ctx context.Context, g *errgroup.Group, remoteState *re
return strings.TrimSpace(tmpDir), cleanup, nil
}
-func buildPackageOnRemote(ctx context.Context, remote *url.URL, extraSSHFlags []string, state *buildState, cfg *Config, pkgBuildOpts *pkgBuildOpts, pkg *pkgs.Package, link string) (string, error) {
+func buildPackageOnRemote(ctx context.Context, remote *url.URL, extraSSHFlags []string, state *buildState, cfg *Config, pkgBuildOpts *pkgBuildOpts, pkg *pkgs.Package, link string) (pkgPath string, err error) {
remoteBuildGroup, ctx := errgroup.WithContext(ctx)
defer remoteBuildGroup.Wait()
+ stopFetchurlServer, err := startFetchurlServer(ctx, remoteBuildGroup, cfg, state)
+ if err != nil {
+ return "", err
+ }
+ defer stopFetchurlServer()
+
remoteState := &remoteBuildState{
remote: remote,
localSSHControlPath: filepath.Join(state.tmpDir, "sshctl"),
@@ 334,11 368,11 @@ func buildPackageOnRemote(ctx context.Context, remote *url.URL, extraSSHFlags []
remoteNormalizedPackagePath := filepath.Join(remoteState.remoteTmpDir, "pkg.edn")
remoteOutLink := filepath.Join(remoteState.remoteTmpDir, "result")
- stopFetchurlServer, err := startRemoteFetchurlServer(ctx, remoteBuildGroup, remoteState, state)
+ stopRemoteFetchurlServer, err := startRemoteFetchurlServer(ctx, remoteBuildGroup, remoteState, state)
if err != nil {
return "", err
}
- defer stopFetchurlServer()
+ defer stopRemoteFetchurlServer()
normalizedPackageEdn, err := edn.Marshal(pkg)
if err != nil {
@@ 547,107 581,125 @@ func buildPackage(ctx context.Context, state *buildState, cfg *Config, pkgBuildO
var packagePath string
- pkgstoreErrGroup, pkgStoreCtx := errgroup.WithContext(ctx)
-
- for i := uint(0); i < pkgBuildOpts.Parallel; i++ {
- // Closure forces a copy of i, instead of capturing a reference.
- func(ctx context.Context, i uint) {
- pkgstoreErrGroup.Go(func() error {
+ buildErrGroup, ctx := errgroup.WithContext(ctx)
- buildErrGroup, ctx := errgroup.WithContext(ctx)
+ stopFetchurlServer, err := startFetchurlServer(ctx, buildErrGroup, cfg, state)
+ if err != nil {
+ return "", err
+ }
+ defer stopFetchurlServer()
- signalSocketPath := state.nextSignalSocketPath()
- signalSocketListener, err := net.Listen("unix", signalSocketPath)
- if err != nil {
- return errors.Wrap(err, "unable to create signal forwarding socket")
- }
+ buildErrGroup.Go(func() error {
+ pkgstoreErrGroup, pkgStoreCtx := errgroup.WithContext(ctx)
- var output bytes.Buffer
+ for i := uint(0); i < pkgBuildOpts.Parallel; i++ {
+ // Closure forces a copy of i, instead of capturing a reference.
+ func(ctx context.Context, i uint) {
+ pkgstoreErrGroup.Go(func() error {
- exited := make(chan struct{}, 1)
+ buildErrGroup, ctx := errgroup.WithContext(ctx)
- buildErrGroup.Go(func() error {
- defer signalSocketListener.Close()
- select {
- case <-ctx.Done():
- return ctx.Err()
- case <-exited:
- return nil
+ signalSocketPath := state.nextSignalSocketPath()
+ signalSocketListener, err := net.Listen("unix", signalSocketPath)
+ if err != nil {
+ return errors.Wrap(err, "unable to create signal forwarding socket")
}
- })
- buildErrGroup.Go(func() error {
- defer close(exited)
+ var output bytes.Buffer
- args := []string{
- "build",
- "--store", cfg.StorePath,
- "--normalized-package", pkgFile.Name(),
- "--fetch-socket", state.fetchSocketPath,
- "--signal-socket", signalSocketPath,
- }
+ exited := make(chan struct{}, 1)
- if pkgBuildOpts.InteractiveDebug {
- if pkgBuildOpts.Parallel != 1 {
- panic("BUG")
+ buildErrGroup.Go(func() error {
+ defer signalSocketListener.Close()
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-exited:
+ return nil
}
- args = append(args, "--interactive-debug")
- }
+ })
- if i == 0 {
- args = append(args, "--link", link)
- }
+ buildErrGroup.Go(func() error {
+ defer close(exited)
- cmd := exec.Command(cfg.PkgStoreBin, args...)
- if i == 0 {
- cmd.Stdout = &output
- }
- if pkgBuildOpts.InteractiveDebug {
- cmd.Stdin = os.Stdin
- cmd.Stdout = os.Stdout
- }
- cmd.Stderr = os.Stderr
- return proctools.RunCmd(ctx, cmd, func() {})
- })
+ args := []string{
+ "build",
+ "--store", cfg.StorePath,
+ "--normalized-package", pkgFile.Name(),
+ "--fetch-socket", state.fetchSocketPath,
+ "--signal-socket", signalSocketPath,
+ }
+
+ if pkgBuildOpts.InteractiveDebug {
+ if pkgBuildOpts.Parallel != 1 {
+ panic("BUG")
+ }
+ args = append(args, "--interactive-debug")
+ }
+
+ if i == 0 {
+ args = append(args, "--link", link)
+ }
+
+ cmd := exec.Command(cfg.PkgStoreBin, args...)
+ if i == 0 {
+ cmd.Stdout = &output
+ }
+ if pkgBuildOpts.InteractiveDebug {
+ cmd.Stdin = os.Stdin
+ cmd.Stdout = os.Stdout
+ }
+ cmd.Stderr = os.Stderr
+ return proctools.RunCmd(ctx, cmd, func() {})
+ })
+
+ buildErrGroup.Go(func() error {
+ c, err := signalSocketListener.Accept()
+ if err != nil {
+ select {
+ case <-exited:
+ return nil
+ default:
+ return err
+ }
+ }
+ defer c.Close()
- buildErrGroup.Go(func() error {
- c, err := signalSocketListener.Accept()
- if err != nil {
select {
+ case <-ctx.Done():
+ // Write the close signal via stdin so we can signal
+ // even when it is a setuid binary.
+ _, _ = c.Write([]byte("Term\n"))
+ return nil
case <-exited:
return nil
- default:
- return err
}
+ })
+
+ err = buildErrGroup.Wait()
+ if err != nil {
+ return err
}
- defer c.Close()
- select {
- case <-ctx.Done():
- // Write the close signal via stdin so we can signal
- // even when it is a setuid binary.
- _, _ = c.Write([]byte("Term\n"))
- return nil
- case <-exited:
- return nil
+ if i == 0 {
+ packagePath = strings.Trim(output.String(), "\n")
}
- })
- err = buildErrGroup.Wait()
- if err != nil {
- return err
- }
+ return nil
+ })
+ }(pkgStoreCtx, i)
+ }
- if i == 0 {
- packagePath = strings.Trim(output.String(), "\n")
- }
+ err = pkgstoreErrGroup.Wait()
+ if err != nil {
+ return err
+ }
+ stopFetchurlServer()
- return nil
- })
- }(pkgStoreCtx, i)
- }
+ return nil
+ })
- err = pkgstoreErrGroup.Wait()
+ err = buildErrGroup.Wait()
if err != nil {
return "", err
}
@@ 658,13 710,6 @@ func buildPackage(ctx context.Context, state *buildState, cfg *Config, pkgBuildO
func loadModule(ctx context.Context, cfg *Config, state *buildState, modUrl, expr string) (hscript.Value, error) {
var err error
- // We do the whole module load within a single sqlite transaction
- // so we only need a single fsync.
- err = state.mirrorDB.Begin()
- if err != nil {
- return nil, err
- }
-
resolveImportPath := func(workingDir string, importv hscript.Value) (string, *bytes.Buffer, error) {
var loadPath string
@@ 690,23 735,11 @@ func loadModule(ctx context.Context, cfg *Config, state *buildState, modUrl, exp
return "", nil, errors.Wrap(err, "error resolving import path")
}
- // Commit the mirror database transaction so that
- // the package build sees the updated mirror lists.
- err = state.mirrorDB.Commit()
- if err != nil {
- return "", nil, err
- }
-
pkgPath, err := buildPackage(ctx, state, cfg, &pkgBuildOpts{Parallel: 1}, pkg, state.nextGCRootPath())
if err != nil {
return "", nil, errors.Wrap(err, "error building import path")
}
- err = state.mirrorDB.Begin()
- if err != nil {
- return "", nil, err
- }
-
_, _ = buf.WriteString(pkgPath)
}
loadPath = buf.String()
@@ 765,18 798,13 @@ func loadModule(ctx context.Context, cfg *Config, state *buildState, modUrl, exp
exports := make(hscript.StringDict)
if modUrl != "" {
- exports, err = interp.ExecModule(cfg.StorePath, modUrl, resolveImportPath, state.mirrorDB)
+ exports, err = interp.ExecModule(cfg.StorePath, modUrl, resolveImportPath, state.mirrors)
if err != nil {
return nil, err
}
}
- v, err := interp.Eval(cfg.StorePath, exports, resolveImportPath, state.mirrorDB, expr)
- if err != nil {
- return nil, err
- }
-
- err = state.mirrorDB.Commit()
+ v, err := interp.Eval(cfg.StorePath, exports, resolveImportPath, state.mirrors, expr)
if err != nil {
return nil, err
}
@@ 840,28 868,13 @@ func buildMain() {
die("Second interrupt, aborting build.\n")
}()
- buildErrGroup, ctx := errgroup.WithContext(ctx)
-
- buildErrGroup.Go(func() error {
+ err = func() error {
tmpDir, err := ioutil.TempDir("", "")
if err != nil {
return err
}
defer os.RemoveAll(tmpDir)
- mirrorDBPath := filepath.Join(tmpDir, "mirrors.db")
- mirrorDB, err := extrasqlite.Open(mirrorDBPath, nil)
- if err != nil {
- return err
- }
- defer mirrorDB.Close()
-
- // TODO, Mirror logic in own package.
- err = mirrorDB.Exec("CREATE Table Mirrors(Hash TEXT, Url TEXT, UNIQUE(Hash,Url));")
- if err != nil {
- return err
- }
-
// GC roots to keep recursive package defs alive
// for the duration of module loading.
gcRootCount := int64(0)
@@ 890,22 903,22 @@ func buildMain() {
return filepath.Join(tmpDir, fmt.Sprintf("signals-%d.socket", v))
}
+ mirrorsEdnCount := int64(0)
+ nextMirrorsEdnPath := func() string {
+ v := atomic.AddInt64(&mirrorsEdnCount, 1)
+ return filepath.Join(tmpDir, fmt.Sprintf("mirrors-%d.edn", v))
+ }
+
state := &buildState{
tmpDir: tmpDir,
fetchSocketPath: filepath.Join(tmpDir, "fetch.sock"),
- mirrorDBPath: mirrorDBPath,
- mirrorDB: mirrorDB,
+ mirrors: interp.MakeMirrorMap(),
nextGCRootPath: nextGCRootPath,
nextPkgEdnPath: nextPkgEdnPath,
+ nextMirrorsEdnPath: nextMirrorsEdnPath,
nextSignalSocketPath: nextSignalSocketPath,
}
- stopFetchurlServer, err := startFetchurlServer(ctx, buildErrGroup, cfg, state)
- if err != nil {
- return err
- }
- defer stopFetchurlServer()
-
pkgv, err := loadModule(ctx, cfg, state, modUrl, *expr)
if err != nil {
return err
@@ 952,7 965,7 @@ func buildMain() {
if *extraSSHFlags != "" {
parsedSSHFlags, err = shellquote.Split(*extraSSHFlags)
if err != nil {
- die("Error parsing --extra-ssh-flags: %s\n", err)
+ return errors.Wrap(err, "error parsing --extra-ssh-flags\n")
}
}
@@ 968,9 981,7 @@ func buildMain() {
}
return nil
- })
-
- err = buildErrGroup.Wait()
+ }()
if err != nil {
if evalError, ok := errors.Cause(err).(*hscript.EvalError); ok {
die("%s\n", evalError.Backtrace())
M src/cmd/hermes/fetchserver.go => src/cmd/hermes/fetchserver.go +2 -2
@@ 38,8 38,8 @@ func fetchServerMain() {
mirrorDB := make(map[string][]string)
- if *mirrors {
- mirrorF, err := os.Open(*mirrors)
+ if opts.Mirrors != "" {
+ mirrorF, err := os.Open(opts.Mirrors)
if err != nil {
die("Unable to open mirror database: %s\n", err)
}
M src/pkgs/interp/builtins.go => src/pkgs/interp/builtins.go +170 -48
@@ 22,6 22,7 @@ func init() {
// XXX we could lazily init so subcommands don't both.
BuiltinEnv = make(hscript.StringDict)
BuiltinEnv["fetch"] = hscript.NewBuiltin("fetch", fetchBuiltin)
+ BuiltinEnv["pkg_tree"] = hscript.NewBuiltin("pkg_tree", pkgTreeBuiltin)
BuiltinEnv["builtins"] = Placeholder{"builtins"}
BuiltinEnv["build_platform"] = &hscript.Module{
@@ 37,48 38,37 @@ func init() {
}
}
-func fetchBuiltin(thread *hscript.Thread, fn *hscript.Builtin, args hscript.Tuple, kwargs []hscript.Tuple) (hscript.Value, error) {
- mirrors := thread.Local("mirror_db").(*MirrorMap)
- currentDir := thread.Local("current_dir").(string)
+// XXX this is a badly factored function, it's processes a url, and does some disambiguation of args depending
+// if it's a local file we are fetching or a remote url. If we are fetching a local directory, it sets
+// the fetch and unpack args.
+//
+// Untangling it would be a good excercise for anyone with time and some creativity.
+// Try to pay attention to performance too, fetch is actually in a performance critical code path when
+// it is used for loading in source code and patches.
+func processAndValidateFetchUrl(fn *hscript.Builtin, mirrors *MirrorMap, currentDir string, hash, fetchUrl, file_name, unpack_type *string, unpack, unpack_unwrap *bool) error {
+ var parsedUrl *url.URL
- fetchUrl := ""
- hash := ""
- // hscript naming convention.
- file_name := ""
- unpack := false
- unpack_unwrap := true
- unpack_type := ""
-
- err := hscript.UnpackArgs(fn.Name(), args, kwargs, "url?", &fetchUrl, "hash?", &hash, "file_name?", &file_name, "unpack?", &unpack, "unpack_unwrap?", &unpack_unwrap, "unpack_type?", &unpack_type)
- if err != nil {
- return nil, err
- }
-
- if strings.HasPrefix(fetchUrl, ".") || strings.HasPrefix(fetchUrl, "/") {
- fpath := fetchUrl
+ if strings.HasPrefix(*fetchUrl, ".") || strings.HasPrefix(*fetchUrl, "/") {
+ fpath := *fetchUrl
if !filepath.IsAbs(fpath) {
fpath = filepath.Join(currentDir, fpath)
}
- fpath, err = filepath.Abs(fpath)
+ fpath, err := filepath.Abs(fpath)
if err != nil {
- return nil, errors.Wrapf(err, "unable to get absolute path of %q", fpath)
+ return errors.Wrapf(err, "unable to get absolute path of %q", fpath)
}
st, err := os.Stat(fpath)
if err != nil {
- return nil, errors.Wrapf(err, "unable to stat %q", fpath)
+ return errors.Wrapf(err, "unable to stat %q", fpath)
}
- u := &url.URL{Scheme: "file", Path: fpath}
- fetchUrl = u.String()
+ parsedUrl = &url.URL{Scheme: "file", Path: fpath}
+ *fetchUrl = parsedUrl.String()
fdir := filepath.Dir(fpath)
- if file_name == "" {
- file_name = filepath.Base(fpath)
- }
-
if !strings.HasPrefix(fdir, currentDir) && !(st.IsDir() && fpath == currentDir) {
_, err := os.Stat(filepath.Join(fdir, ".hermes_allow"))
// If it's a dir, the allow file can be within the dir.
@@ 86,62 76,194 @@ func fetchBuiltin(thread *hscript.Thread, fn *hscript.Builtin, args hscript.Tupl
_, err = os.Stat(filepath.Join(fpath, ".hermes_allow"))
}
if err != nil {
- return nil, errors.Errorf("security violation: access of %q outside of current module directory and without file '.hermes_allow'", fpath)
+ return errors.Errorf("security violation: access of %q outside of current module directory and without file '.hermes_allow'", fpath)
}
}
if st.IsDir() {
- unpack_type = "tar"
- unpack_unwrap = false
- unpack = true
+ *unpack_type = "tar"
+ *unpack_unwrap = false
+ *unpack = true
}
- if hash == "" {
+ // If the user didn't specify the hash for a local file, we must read it.
+ // if they did specify the hash, we can skip a whole lot of pointless io.
+ if *hash == "" {
h, err := hhash.New("sha256", "hex")
if err != nil {
- return nil, err
+ return err
}
if st.IsDir() {
err = dtar.MakeTar(fpath, func(string) bool { return true }, h)
if err != nil {
- return nil, err
+ return err
}
} else {
f, err := os.Open(fpath)
if err != nil {
- return nil, errors.Wrapf(err, "unable to open local file %s", fpath)
+ return errors.Wrapf(err, "unable to open local file %s", fpath)
}
defer f.Close()
_, err = io.Copy(h, f)
if err != nil {
- return nil, err
+ return err
}
}
- hash = h.Done()
+ *hash = h.Done()
}
- } else if fetchUrl != "" {
- u, err := url.Parse(fetchUrl)
+ } else if *fetchUrl != "" {
+ u, err := url.Parse(*fetchUrl)
if err != nil {
- return nil, errors.Wrapf(err, "unable to parse url: %q", fetchUrl)
+ return errors.Wrapf(err, "unable to parse url: %q", fetchUrl)
}
+ parsedUrl = u
+ }
- if file_name == "" {
- file_name = path.Base(u.Path)
- }
+ if *file_name == "" {
+ *file_name = path.Base(parsedUrl.Path)
}
- if hash != "" && fetchUrl != "" {
- err = mirrors.AddContentMirror(hash, fetchUrl)
+ if *hash == "" && *fetchUrl == "" {
+ return errors.Errorf("%s requires either a hash or a url", fn.Name())
+ }
+
+ if *hash != "" && *fetchUrl != "" {
+ err := mirrors.AddContentMirror(*hash, *fetchUrl)
if err != nil {
- return nil, err
+ return err
}
// We no longer need the url, If we have an entry in the hash database.
- // This allows us to change urls without breaking package builds.
- fetchUrl = ""
+ // This allows us to change urls without breaking package builds because
+ // the url is not part of the package hash.
+ *fetchUrl = ""
+ }
+
+ return nil
+}
+
+func pkgTreeBuiltin(thread *hscript.Thread, fn *hscript.Builtin, args hscript.Tuple, kwargs []hscript.Tuple) (hscript.Value, error) {
+ mirrors := thread.Local("mirror_db").(*MirrorMap)
+ currentDir := thread.Local("current_dir").(string)
+
+ hash := ""
+
+ err := hscript.UnpackArgs(fn.Name(), args, kwargs, "hash", &hash)
+ if err != nil {
+ return nil, err
+ }
+
+ builder := hscript.NewList([]hscript.Value{
+ hscript.String("#!"),
+ Placeholder{"builtins"},
+ hscript.String("/runcmds\n"),
+ })
+
+ malformedArgError := func(idx int) error {
+ return errors.Errorf("argument %d should be of the form [\"importpath\", \"url\" | package]", idx)
+ }
+
+ for argIdx, imp := range args {
+ switch imp := imp.(type) {
+ case *hscript.List:
+ if imp.Len() != 2 {
+ return nil, malformedArgError(argIdx)
+ }
+
+ himportPathVal := imp.Index(0)
+ himportPathStr, ok := himportPathVal.(hscript.String)
+ if !ok {
+ return nil, malformedArgError(argIdx)
+ }
+ importPath := string(himportPathStr)
+ if strings.HasPrefix(importPath, "/") {
+ importPath = importPath[1:]
+ }
+
+ switch importContent := imp.Index(1).(type) {
+ case hscript.String:
+ // we are importing from a url string.
+ fetchUrl := string(importContent)
+ file_name := ""
+ unpack := true
+ unpack_unwrap := true
+ unpack_type := ""
+
+ err = processAndValidateFetchUrl(fn, mirrors, currentDir, &hash, &fetchUrl, &file_name, &unpack_type, &unpack, &unpack_unwrap)
+ if err != nil {
+ return nil, err
+ }
+
+ builder.Append(Placeholder{"builtins"})
+ builder.Append(
+ hscript.String(
+ fmt.Sprintf(
+ "/fetch %q %q\n",
+ "--url="+fetchUrl,
+ "--out="+file_name,
+ ),
+ ),
+ )
+ builder.Append(Placeholder{"builtins"})
+ builder.Append(
+ hscript.String(
+ fmt.Sprintf(
+ "/unpack %q %q\n",
+ "--dest=${out}/"+importPath,
+ file_name,
+ ),
+ ),
+ )
+
+ case *hscript.List, *hscript.Dict:
+ // We are importing from a package.
+ builder.Append(Placeholder{"builtins"})
+ builder.Append(hscript.String("/symlink \""))
+ builder.Append(importContent)
+ builder.Append(hscript.String(fmt.Sprintf("\" %q\n", "${out}/"+importPath)))
+ default:
+ return nil, malformedArgError(argIdx)
+ }
+ }
+ }
+
+ d := &hscript.Dict{}
+ d.SetKey(hscript.String("builder"), builder)
+ d.SetKey(hscript.String("content"), hscript.String(hash))
+ return d, nil
+}
+
+func fetchBuiltin(thread *hscript.Thread, fn *hscript.Builtin, args hscript.Tuple, kwargs []hscript.Tuple) (hscript.Value, error) {
+ mirrors := thread.Local("mirror_db").(*MirrorMap)
+ currentDir := thread.Local("current_dir").(string)
+
+ fetchUrl := ""
+ hash := ""
+ // hscript naming convention.
+ file_name := ""
+ unpack := false
+ unpack_unwrap := true
+ unpack_type := ""
+
+ err := hscript.UnpackArgs(
+ fn.Name(), args, kwargs,
+ "url?", &fetchUrl,
+ "hash?", &hash,
+ "file_name?", &file_name,
+ "unpack?", &unpack,
+ "unpack_unwrap?", &unpack_unwrap,
+ "unpack_type?", &unpack_type,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ err = processAndValidateFetchUrl(fn, mirrors, currentDir, &hash, &fetchUrl, &file_name, &unpack_type, &unpack, &unpack_unwrap)
+ if err != nil {
+ return nil, err
}
urlArg := ""
M src/pkgs/interp/interp.go => src/pkgs/interp/interp.go +4 -5
@@ 7,7 7,6 @@ import (
"path/filepath"
"github.com/andrewchambers/hermes/hscript/hscript"
- "github.com/bvinc/go-sqlite-lite/sqlite3"
"github.com/pkg/errors"
"olympos.io/encoding/edn"
)
@@ 57,7 56,7 @@ func addBuiltins(storePath string, e hscript.StringDict) hscript.StringDict {
// Given a working dir and a hscript value, returns it's path/url and bytes of the loaded module.
type ModuleResolver func(workingDir string, pkg hscript.Value) (string, *bytes.Buffer, error)
-func newStarlarkThread(mc *ModuleCache, storePath, dir string, resolveModule ModuleResolver, mirrors *sqlite3.Conn) *hscript.Thread {
+func newStarlarkThread(mc *ModuleCache, storePath, dir string, resolveModule ModuleResolver, mirrors *MirrorMap) *hscript.Thread {
thread := &hscript.Thread{Load: mc.Load}
// XXX TODO These can all just be one state struct.
thread.SetLocal("module_cache", mc)
@@ 73,7 72,7 @@ func (mc *ModuleCache) Load(thread *hscript.Thread, value hscript.Value) (string
storePath := thread.Local("store_path").(string)
currentDir := thread.Local("current_dir").(string)
resolveModule := thread.Local("resolve_module").(ModuleResolver)
- mirrors := thread.Local("mirror_db").(*sqlite3.Conn)
+ mirrors := thread.Local("mirror_db").(*MirrorMap)
modulePath, moduleBytes, err := resolveModule(currentDir, value)
if err != nil {
@@ 98,7 97,7 @@ func (mc *ModuleCache) Load(thread *hscript.Thread, value hscript.Value) (string
}
// ExecFile run a hermes module url with hscript hermes specific builtins and setup.
-func ExecModule(storePath, modPath string, resolveModule ModuleResolver, mirrors *sqlite3.Conn) (hscript.StringDict, error) {
+func ExecModule(storePath, modPath string, resolveModule ModuleResolver, mirrors *MirrorMap) (hscript.StringDict, error) {
mc := NewModuleCache()
currentDir, err := os.Getwd()
@@ 119,7 118,7 @@ func ExecModule(storePath, modPath string, resolveModule ModuleResolver, mirrors
return globals, nil
}
-func Eval(storePath string, env hscript.StringDict, resolveModule ModuleResolver, mirrors *sqlite3.Conn, expr string) (hscript.Value, error) {
+func Eval(storePath string, env hscript.StringDict, resolveModule ModuleResolver, mirrors *MirrorMap, expr string) (hscript.Value, error) {
mc := NewModuleCache()
currentDir, err := os.Getwd()
if err != nil {
M src/pkgs/interp/mirrormap.go => src/pkgs/interp/mirrormap.go +0 -7
@@ 1,7 1,6 @@
package interp
import (
- "net/url"
"sync"
)
@@ 28,12 27,6 @@ func (m *MirrorMap) AddContentMirror(content string, contentUrl string) error {
}
}
- // Validate url syntax
- _, err := url.Parse(contentUrl)
- if err != nil {
- return err
- }
-
uset = append(uset, contentUrl)
m.contentToUrls[content] = uset
return nil
D tests/devtests/0012-local-content-mirror.test => tests/devtests/0012-local-content-mirror.test +0 -13
@@ 1,13 0,0 @@
-#! /usr/bin/env bash
-
-set -eu
-cd "$( dirname "${BASH_SOURCE[0]}" )"
-
-. ./devtests.inc
-
-echo "content_server = \"file://$(pwd)\"" >> $HERMES_CONFIG
-mkdir ./content
-echo sane > ./content/sane.txt
-hermes index-content --verbose ./content
-hermes build -e "fetch(file_name='sane.txt', hash='sha256:ceae332dadb9718978b12e91288c110b9478a56872a3c93f284cdb848935af43')"
-diff -u content/sane.txt ./result/sane.txt>
\ No newline at end of file
D tests/devtests/0013-http-content-mirror.test => tests/devtests/0013-http-content-mirror.test +0 -20
@@ 1,20 0,0 @@
-#! /usr/bin/env bash
-
-set -eu
-cd "$( dirname "${BASH_SOURCE[0]}" )"
-
-. ./devtests.inc
-
-# XXX can we do better than just stealing a port?
-echo "content_server = \"http://ac:password@localhost:9846\"" >> $HERMES_CONFIG
-mkdir ./content
-echo sane > ./content/sane.txt
-hermes index-content --verbose ./content
-timeout 5 python3 -m http.server --bind localhost 9846 &
-serverpid="$!"
-sleep 0.3 # wait for server to come up.
-hermes build -e "fetch(file_name='sane.txt', hash='sha256:ceae332dadb9718978b12e91288c110b9478a56872a3c93f284cdb848935af43')" \
- || true
-kill "$serverpid"
-wait
-diff -u content/sane.txt ./result/sane.txt>
\ No newline at end of file