Merge pull request #10 from TLINDEN/proto

Use protobuf for internal data storage instead of json
This commit is contained in:
T.v.Dein
2024-12-30 12:33:03 +01:00
committed by GitHub
22 changed files with 638 additions and 222 deletions

View File

@@ -29,7 +29,7 @@ BUILD = $(shell date +%Y.%m.%d.%H%M%S)
VERSION := $(if $(filter $(BRANCH), development),$(version)-$(BRANCH)-$(COMMIT)-$(BUILD),$(version))
HAVE_POD := $(shell pod2text -h 2>/dev/null)
all: $(tool).1 cmd/$(tool).go buildlocal
all: $(tool).1 cmd/$(tool).go app/dbentry.pb.go buildlocal
%.1: %.pod
ifdef HAVE_POD
@@ -49,6 +49,11 @@ endif
# awk '/SYNOPS/{f=1;next} /DESCR/{f=0} f' $*.pod | sed 's/^ //' >> cmd/$*.go
# echo "\`" >> cmd/$*.go
app/dbentry.pb.go: app/dbentry.proto
protoc -I=. --go_out=app app/dbentry.proto
mv app/github.com/tlinden/anydb/app/dbentry.pb.go app/dbentry.pb.go
rm -rf app/github.com
buildlocal:
go build -ldflags "-X 'github.com/tlinden/anydb/cfg.VERSION=$(VERSION)'"

View File

@@ -76,10 +76,14 @@ anydb set foo bar -t note,important
anydb list -t important
# beside tags filtering you can also use regexps for searching
# note, by default the list command only searches through keys
anydb list '[a-z]+\d'
# do a full text search
anydb list '[a-z]+\d' -s
# anydb also supports a wide output
anydb list -o wide
anydb list -m wide
KEY TAGS SIZE AGE VALUE
blah important 4 B 7 seconds ago haha
foo 3 B 15 seconds ago bar
@@ -90,13 +94,13 @@ anydb ls -l
anydb /
# other outputs are possible as well
anydb list -o json
anydb list -m json
# you can backup your database
anydb export -o backup.json
# and import it somewhere else
anydb import -r backup.json
anydb import -i backup.json
# you can encrypt entries. anydb asks for a passphrase
# and will do the same when you retrieve the key using the

36
TODO.md
View File

@@ -1,5 +1,39 @@
## Features
- repl
- mime-type => exec app + value
- add waitgroup to db.go funcs
- RestList does not support any params?
- lc() incoming tags
- lc() incoming tags+keys
## DB Structure
- put tags into sub bucket see #1
- change structure to:
data bucket
key => {key,value[0:60],isbin:bool}
value bucket
key => value (maybe always use []byte here)
tags bucket
key/tag => tag/key
tag/key => tag
So, list just uses the data bucket, no large contents.
A tag search only looksup matching tags, see #1.
Only a full text search and get would need to dig into the value bucket.
A delete would just delete all keys from all values and then:
lookup in tags bucket for all key/*, then iterate over the values and
remove all tag/key's. Then deleting a key would not leave any residue
behind.
However, maybe change the list command to just list everything and add
an extra find command for fulltext or tag search. Maybe still provide
filter options in list command but only filter for keys.
DONE: most of the above, except the tag stuff. manpage needs update and tests.
maybe stitch the find command and just add -f (full text search) to list.

39
anydb.1
View File

@@ -1,4 +1,4 @@
.\" Automatically generated by Pod::Man 4.14 (Pod::Simple 3.40)
.\" Automatically generated by Pod::Man 4.14 (Pod::Simple 3.42)
.\"
.\" Standard preamble:
.\" ========================================================================
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "ANYDB 1"
.TH ANYDB 1 "2024-12-25" "1" "User Commands"
.TH ANYDB 1 "2024-12-30" "1" "User Commands"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -356,18 +356,18 @@ Usage:
.PP
.Vb 2
\& Usage:
\& anydb list [<filter\-regex>] [\-t <tag>] [\-m <mode>] [\-n \-N] [\-T <tpl>] [\-i] [flags]
\& anydb list [<filter\-regex> | \-t <tag> ] [\-m <mode>] [\-nNif] [\-T <tpl>] [flags]
\&
\& Aliases:
\& list, /, ls
\& list, ls, /, find, search
\&
\& Flags:
\& \-i, \-\-case\-insensitive filter case insensitive
\& \-h, \-\-help help for list
\& \-m, \-\-mode string output format (table|wide|json|template),
\& wide is a verbose table. (default \*(Aqtable\*(Aq)
\& \-m, \-\-mode string output format (table|wide|json|template), wide is a verbose table. (default \*(Aqtable\*(Aq)
\& \-n, \-\-no\-headers omit headers in tables
\& \-N, \-\-no\-human do not translate to human readable values
\& \-s, \-\-search\-fulltext perform a full text search
\& \-t, \-\-tags stringArray tags, multiple allowed
\& \-T, \-\-template string go template for \*(Aq\-m template\*(Aq
\& \-l, \-\-wide\-output output mode: wide
@@ -409,6 +409,10 @@ features.
.PP
If you want to search case insensitive, add the option \f(CW\*(C`\-i\*(C'\fR.
.PP
By default anydb only searches through the keys. If you want to search
through the values as well, then use the \f(CW\*(C`\-s\*(C'\fR option, which enables
full-text search.
.PP
You can \- as with the \fBget\fR command \- use other output modes. The
default mode is \*(L"table\*(R". The \*(L"wide\*(R" mode is, as already mentioned, a
more detailed table. Also supported is \*(L"json\*(R" mode and \*(L"template\*(R"
@@ -478,7 +482,7 @@ Usage:
.PP
.Vb 2
\& Usage:
\& anydb export [\-o <json filename>] [flags]
\& anydb export \-o <json filename> [flags]
\&
\& Aliases:
\& export, dump, backup
@@ -488,13 +492,13 @@ Usage:
\& \-o, \-\-output string output to file
.Ve
.PP
The database dump is a \s-1JSON\s0 representation of the whole database and
will be printed to \s-1STDOUT\s0 by default. Redirect it to a file or use the
\&\f(CW\*(C`\-o\*(C'\fR option:
The database dump is a \s-1JSON\s0 representation of the whole database and
will be printed to the file specified with the \f(CW\*(C`\-o\*(C'\fR option. If you
specify \*(L"\-\*(R" as the filename, it will be written to \s-1STDIN.\s0
.PP
.Vb 2
\& anydb export > dump.json
\& anydb export \-o dump.json
\& anydb export \-o \- > dump.json
.Ve
.PP
Please note, that encrypted values will not be decrypted. This might
@@ -508,7 +512,7 @@ Usage:
.PP
.Vb 2
\& Usage:
\& anydb import [<json file>] [flags]
\& anydb import \-i <json file> [flags]
\&
\& Aliases:
\& import, restore
@@ -519,13 +523,14 @@ Usage:
\& \-t, \-\-tags stringArray tags, multiple allowed
.Ve
.PP
By default the \f(CW\*(C`import\*(C'\fR subcommand reads the \s-1JSON\s0 contents from
\&\s-1STDIN.\s0 You might pipe the dump into it or use the option \f(CW\*(C`\-r\*(C'\fR:
The \f(CW\*(C`import\*(C'\fR subcommand reads the \s-1JSON\s0 contents from
the file specified with the \f(CW\*(C`\-i\*(C'\fR option. If you specify \*(L"\-\*(R" as the
filename, it will be read from \s-1STDIN.\s0
.PP
.Vb 3
\& anydb import < dump.json
\& anydb import \-r dump.json
\& cat dump.json | anydb import
\& anydb import \-i \- < dump.json
\& anydb import \-i dump.json
\& cat dump.json | anydb import \-i \-
.Ve
.PP
If there is already a database, it will be saved by appending a

View File

@@ -206,18 +206,18 @@ The B<list> subcommand displays a list of all database entries.
Usage:
Usage:
anydb list [<filter-regex>] [-t <tag>] [-m <mode>] [-n -N] [-T <tpl>] [-i] [flags]
anydb list [<filter-regex> | -t <tag> ] [-m <mode>] [-nNif] [-T <tpl>] [flags]
Aliases:
list, /, ls
list, ls, /, find, search
Flags:
-i, --case-insensitive filter case insensitive
-h, --help help for list
-m, --mode string output format (table|wide|json|template),
wide is a verbose table. (default 'table')
-m, --mode string output format (table|wide|json|template), wide is a verbose table. (default 'table')
-n, --no-headers omit headers in tables
-N, --no-human do not translate to human readable values
-s, --search-fulltext perform a full text search
-t, --tags stringArray tags, multiple allowed
-T, --template string go template for '-m template'
-l, --wide-output output mode: wide
@@ -254,6 +254,10 @@ features.
If you want to search case insensitive, add the option C<-i>.
By default anydb only searches through the keys. If you want to search
through the values as well, then use the C<-s> option, which enables
full-text search.
You can - as with the B<get> command - use other output modes. The
default mode is "table". The "wide" mode is, as already mentioned, a
more detailed table. Also supported is "json" mode and "template"
@@ -323,7 +327,7 @@ the B<export> subcommand.
Usage:
Usage:
anydb export [-o <json filename>] [flags]
anydb export -o <json filename> [flags]
Aliases:
export, dump, backup
@@ -332,12 +336,12 @@ Usage:
-h, --help help for export
-o, --output string output to file
The database dump is a JSON representation of the whole database and
will be printed to STDOUT by default. Redirect it to a file or use the
C<-o> option:
The database dump is a JSON representation of the whole database and
will be printed to the file specified with the C<-o> option. If you
specify "-" as the filename, it will be written to STDIN.
anydb export > dump.json
anydb export -o dump.json
anydb export -o - > dump.json
Please note, that encrypted values will not be decrypted. This might
change in a future version of anydb.
@@ -350,7 +354,7 @@ dump.
Usage:
Usage:
anydb import [<json file>] [flags]
anydb import -i <json file> [flags]
Aliases:
import, restore
@@ -360,12 +364,13 @@ Usage:
-h, --help help for import
-t, --tags stringArray tags, multiple allowed
By default the C<import> subcommand reads the JSON contents from
STDIN. You might pipe the dump into it or use the option C<-r>:
The C<import> subcommand reads the JSON contents from
the file specified with the C<-i> option. If you specify "-" as the
filename, it will be read from STDIN.
anydb import < dump.json
anydb import -r dump.json
cat dump.json | anydb import
anydb import -i - < dump.json
anydb import -i dump.json
cat dump.json | anydb import -i -
If there is already a database, it will be saved by appending a
timestamp and a new database with the contents of the dump will be

View File

@@ -20,30 +20,34 @@ import (
"fmt"
"io"
"os"
"strings"
"unicode/utf8"
)
type DbAttr struct {
Key string
Val string
Bin []byte
Preview string
Val []byte
Args []string
Tags []string
File string
Encrypted bool
Binary bool
}
// check if value is to be read from a file or stdin, setup preview
// text according to flags, lowercase key
func (attr *DbAttr) ParseKV() error {
attr.Key = strings.ToLower(attr.Args[0])
switch len(attr.Args) {
case 1:
// 1 arg = key + read from file or stdin
attr.Key = attr.Args[0]
if attr.File == "" {
attr.File = "-"
}
case 2:
attr.Key = attr.Args[0]
attr.Val = attr.Args[1]
attr.Val = []byte(attr.Args[1])
if attr.Args[1] == "-" {
attr.File = "-"
@@ -51,7 +55,29 @@ func (attr *DbAttr) ParseKV() error {
}
if attr.File != "" {
return attr.GetFileValue()
if err := attr.GetFileValue(); err != nil {
return err
}
}
switch {
case attr.Binary:
attr.Preview = "<binary-content>"
case attr.Encrypted:
attr.Preview = "<encrypted-content>"
default:
if len(attr.Val) > MaxValueWidth {
attr.Preview = string(attr.Val)[0:MaxValueWidth] + "..."
if strings.Contains(attr.Preview, "\n") {
parts := strings.Split(attr.Preview, "\n")
if len(parts) > 0 {
attr.Preview = parts[0]
}
}
} else {
attr.Preview = string(attr.Val)
}
}
return nil
@@ -82,11 +108,12 @@ func (attr *DbAttr) GetFileValue() error {
}
// poor man's text file test
sdata := string(data)
if utf8.ValidString(sdata) {
attr.Val = sdata
attr.Val = data
if utf8.ValidString(string(data)) {
attr.Binary = false
} else {
attr.Bin = data
attr.Binary = true
}
} else {
// read from console stdin
@@ -101,7 +128,7 @@ func (attr *DbAttr) GetFileValue() error {
data += input + "\n"
}
attr.Val = data
attr.Val = []byte(data)
}
return nil

View File

@@ -18,7 +18,6 @@ package app
import (
"crypto/rand"
"encoding/base64"
"errors"
"fmt"
"os"
@@ -104,7 +103,7 @@ func GetRandom(size int, capacity int) ([]byte, error) {
// modifying it.
//
// The cipher text consists of:
// base64(password-salt) + base64(12 byte nonce + ciphertext + 16 byte mac)
// password-salt) + (12 byte nonce + ciphertext + 16 byte mac)
func Encrypt(pass []byte, attr *DbAttr) error {
key, err := DeriveKey(pass, nil)
if err != nil {
@@ -116,25 +115,17 @@ func Encrypt(pass []byte, attr *DbAttr) error {
return fmt.Errorf("failed to create AEAD cipher: %w", err)
}
var plain []byte
if attr.Val != "" {
plain = []byte(attr.Val)
} else {
plain = attr.Bin
}
total := aead.NonceSize() + len(plain) + aead.Overhead()
total := aead.NonceSize() + len(attr.Val) + aead.Overhead()
nonce, err := GetRandom(aead.NonceSize(), total)
if err != nil {
return err
}
cipher := aead.Seal(nonce, nonce, plain, nil)
cipher := aead.Seal(nonce, nonce, attr.Val, nil)
attr.Bin = nil
attr.Val = base64.RawStdEncoding.EncodeToString(key.Salt) +
base64.RawStdEncoding.EncodeToString(cipher)
attr.Val = append(attr.Val, key.Salt...)
attr.Val = append(attr.Val, cipher...)
attr.Encrypted = true
@@ -142,21 +133,17 @@ func Encrypt(pass []byte, attr *DbAttr) error {
}
// Do the reverse
func Decrypt(pass []byte, cipherb64 string) ([]byte, error) {
salt, err := base64.RawStdEncoding.Strict().DecodeString(cipherb64[0:B64SaltLen])
if err != nil {
return nil, fmt.Errorf("failed to encode to base64: %w", err)
func Decrypt(pass []byte, cipherb []byte) ([]byte, error) {
if len(cipherb) < B64SaltLen {
return nil, fmt.Errorf("encrypted cipher block too small")
}
key, err := DeriveKey(pass, salt)
key, err := DeriveKey(pass, cipherb[0:B64SaltLen])
if err != nil {
return nil, err
}
cipher, err := base64.RawStdEncoding.Strict().DecodeString(cipherb64[B64SaltLen:])
if err != nil {
return nil, fmt.Errorf("failed to encode to base64: %w", err)
}
cipher := cipherb[B64SaltLen:]
aead, err := chacha20poly1305.New(key.Key)
if err != nil {

253
app/db.go
View File

@@ -27,6 +27,8 @@ import (
"time"
bolt "go.etcd.io/bbolt"
"google.golang.org/protobuf/proto"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
)
const MaxValueWidth int = 60
@@ -38,17 +40,6 @@ type DB struct {
DB *bolt.DB
}
type DbEntry struct {
Id string `json:"id"`
Key string `json:"key"`
Value string `json:"value"`
Encrypted bool `json:"encrypted"`
Bin []byte `json:"bin"`
Tags []string `json:"tags"`
Created time.Time `json:"created"`
Size int
}
type BucketInfo struct {
Name string
Keys int
@@ -62,33 +53,7 @@ type DbInfo struct {
Path string
}
// Post process an entry for list output.
// Do NOT call it during write processing!
func (entry *DbEntry) Normalize() {
entry.Size = len(entry.Value)
if entry.Encrypted {
entry.Value = "<encrypted-content>"
}
if len(entry.Bin) > 0 {
entry.Value = "<binary-content>"
entry.Size = len(entry.Bin)
}
if strings.Contains(entry.Value, "\n") {
parts := strings.Split(entry.Value, "\n")
if len(parts) > 0 {
entry.Value = parts[0]
}
}
if len(entry.Value) > MaxValueWidth {
entry.Value = entry.Value[0:MaxValueWidth] + "..."
}
}
type DbEntries []DbEntry
type DbEntries []*DbEntry
type DbTag struct {
Keys []string `json:"key"`
@@ -120,7 +85,7 @@ func (db *DB) Close() error {
return db.DB.Close()
}
func (db *DB) List(attr *DbAttr) (DbEntries, error) {
func (db *DB) List(attr *DbAttr, fulltext bool) (DbEntries, error) {
if err := db.Open(); err != nil {
return nil, err
}
@@ -134,27 +99,43 @@ func (db *DB) List(attr *DbAttr) (DbEntries, error) {
}
err := db.DB.View(func(tx *bolt.Tx) error {
root := tx.Bucket([]byte(db.Bucket))
if root == nil {
return nil
}
bucket := tx.Bucket([]byte(db.Bucket))
bucket := root.Bucket([]byte("meta"))
if bucket == nil {
return nil
}
err := bucket.ForEach(func(key, jsonentry []byte) error {
databucket := root.Bucket([]byte("data"))
if databucket == nil {
return fmt.Errorf("failed to retrieve data sub bucket")
}
err := bucket.ForEach(func(key, pbentry []byte) error {
var entry DbEntry
if err := json.Unmarshal(jsonentry, &entry); err != nil {
return fmt.Errorf("failed to unmarshal from json: %w", err)
if err := proto.Unmarshal(pbentry, &entry); err != nil {
return fmt.Errorf("failed to unmarshal from protobuf: %w", err)
}
entry.Value = databucket.Get([]byte(entry.Key)) // empty is ok
var include bool
switch {
case filter != nil:
if filter.MatchString(entry.Value) ||
filter.MatchString(entry.Key) ||
if filter.MatchString(entry.Key) ||
filter.MatchString(strings.Join(entry.Tags, " ")) {
include = true
}
if !entry.Binary && !include && fulltext {
if filter.MatchString(string(entry.Value)) {
include = true
}
}
case len(attr.Tags) > 0:
for _, search := range attr.Tags {
for _, tag := range entry.Tags {
@@ -173,7 +154,7 @@ func (db *DB) List(attr *DbAttr) (DbEntries, error) {
}
if include {
entries = append(entries, entry)
entries = append(entries, &entry)
}
return nil
@@ -181,6 +162,7 @@ func (db *DB) List(attr *DbAttr) (DbEntries, error) {
return err
})
return entries, err
}
@@ -192,30 +174,36 @@ func (db *DB) Set(attr *DbAttr) error {
entry := DbEntry{
Key: attr.Key,
Value: attr.Val,
Bin: attr.Bin,
Binary: attr.Binary,
Tags: attr.Tags,
Encrypted: attr.Encrypted,
Created: time.Now(),
Created: timestamppb.Now(),
Size: uint64(len(attr.Val)),
Preview: attr.Preview,
}
// check if the entry already exists and if yes, check if it has
// any tags. if so, we initialize our update struct with these
// tags unless it has new tags configured.
err := db.DB.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(db.Bucket))
root := tx.Bucket([]byte(db.Bucket))
if root == nil {
return nil
}
bucket := root.Bucket([]byte("meta"))
if bucket == nil {
return nil
}
jsonentry := bucket.Get([]byte(entry.Key))
if jsonentry == nil {
pbentry := bucket.Get([]byte(entry.Key))
if pbentry == nil {
return nil
}
var oldentry DbEntry
if err := json.Unmarshal(jsonentry, &oldentry); err != nil {
return fmt.Errorf("failed to unmarshal from json: %w", err)
if err := proto.Unmarshal(pbentry, &oldentry); err != nil {
return fmt.Errorf("failed to unmarshal from protobuf: %w", err)
}
if len(oldentry.Tags) > 0 && len(entry.Tags) == 0 {
@@ -230,19 +218,39 @@ func (db *DB) Set(attr *DbAttr) error {
return err
}
// marshall our data
pbentry, err := proto.Marshal(&entry)
if err != nil {
return fmt.Errorf("failed to marshall protobuf: %w", err)
}
err = db.DB.Update(func(tx *bolt.Tx) error {
// insert data
bucket, err := tx.CreateBucketIfNotExists([]byte(db.Bucket))
// create root bucket
root, err := tx.CreateBucketIfNotExists([]byte(db.Bucket))
if err != nil {
return fmt.Errorf("failed to create DB bucket: %w", err)
}
jsonentry, err := json.Marshal(entry)
// create meta bucket
bucket, err := root.CreateBucketIfNotExists([]byte("meta"))
if err != nil {
return fmt.Errorf("failed to marshall json: %w", err)
return fmt.Errorf("failed to create DB meta sub bucket: %w", err)
}
err = bucket.Put([]byte(entry.Key), []byte(jsonentry))
// write meta data
err = bucket.Put([]byte(entry.Key), []byte(pbentry))
if err != nil {
return fmt.Errorf("failed to insert data: %w", err)
}
// create data bucket
databucket, err := root.CreateBucketIfNotExists([]byte("data"))
if err != nil {
return fmt.Errorf("failed to create DB data sub bucket: %w", err)
}
// write value
err = databucket.Put([]byte(entry.Key), attr.Val)
if err != nil {
return fmt.Errorf("failed to insert data: %w", err)
}
@@ -266,21 +274,48 @@ func (db *DB) Get(attr *DbAttr) (*DbEntry, error) {
entry := DbEntry{}
err := db.DB.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(db.Bucket))
// root bucket
root := tx.Bucket([]byte(db.Bucket))
if root == nil {
return nil
}
// get meta sub bucket
bucket := root.Bucket([]byte("meta"))
if bucket == nil {
return nil
}
jsonentry := bucket.Get([]byte(attr.Key))
if jsonentry == nil {
// FIXME: shall we return a key not found error?
return nil
// retrieve meta data
pbentry := bucket.Get([]byte(attr.Key))
if pbentry == nil {
return fmt.Errorf("no such key: %s", attr.Key)
}
if err := json.Unmarshal(jsonentry, &entry); err != nil {
return fmt.Errorf("failed to unmarshal from json: %w", err)
// put into struct
if err := proto.Unmarshal(pbentry, &entry); err != nil {
return fmt.Errorf("failed to unmarshal from protobuf: %w", err)
}
// get data sub bucket
databucket := root.Bucket([]byte("data"))
if databucket == nil {
return fmt.Errorf("failed to retrieve data sub bucket")
}
// retrieve actual data value
value := databucket.Get([]byte(attr.Key))
if len(value) == 0 {
return fmt.Errorf("no such key: %s", attr.Key)
}
// we need to make a copy of it, otherwise we'll get an
// "unexpected fault address" error
vc := make([]byte, len(value))
copy(vc, value)
entry.Value = vc
return nil
})
@@ -317,7 +352,7 @@ func (db *DB) Import(attr *DbAttr) (string, error) {
return "", err
}
if attr.Val == "" {
if len(attr.Val) == 0 {
return "", errors.New("empty json file")
}
@@ -345,22 +380,41 @@ func (db *DB) Import(attr *DbAttr) (string, error) {
defer db.Close()
err := db.DB.Update(func(tx *bolt.Tx) error {
// insert data
bucket, err := tx.CreateBucketIfNotExists([]byte(db.Bucket))
// create root bucket
root, err := tx.CreateBucketIfNotExists([]byte(db.Bucket))
if err != nil {
return fmt.Errorf("failed to create bucket: %w", err)
return fmt.Errorf("failed to create DB bucket: %w", err)
}
// create meta bucket
bucket, err := root.CreateBucketIfNotExists([]byte("meta"))
if err != nil {
return fmt.Errorf("failed to create DB meta sub bucket: %w", err)
}
for _, entry := range entries {
jsonentry, err := json.Marshal(entry)
pbentry, err := proto.Marshal(entry)
if err != nil {
return fmt.Errorf("failed to marshall json: %w", err)
return fmt.Errorf("failed to marshall protobuf: %w", err)
}
err = bucket.Put([]byte(entry.Key), []byte(jsonentry))
// write meta data
err = bucket.Put([]byte(entry.Key), []byte(pbentry))
if err != nil {
return fmt.Errorf("failed to insert data into DB: %w", err)
}
// create data bucket
databucket, err := root.CreateBucketIfNotExists([]byte("data"))
if err != nil {
return fmt.Errorf("failed to create DB data sub bucket: %w", err)
}
// write value
err = databucket.Put([]byte(entry.Key), entry.Value)
if err != nil {
return fmt.Errorf("failed to insert data: %w", err)
}
}
return nil
@@ -417,3 +471,56 @@ func (db *DB) Info() (*DbInfo, error) {
return info, err
}
func (db *DB) Getall(attr *DbAttr) (DbEntries, error) {
if err := db.Open(); err != nil {
return nil, err
}
defer db.Close()
var entries DbEntries
err := db.DB.View(func(tx *bolt.Tx) error {
// root bucket
root := tx.Bucket([]byte(db.Bucket))
if root == nil {
return nil
}
// get meta sub bucket
bucket := root.Bucket([]byte("meta"))
if bucket == nil {
return nil
}
// get data sub bucket
databucket := root.Bucket([]byte("data"))
if databucket == nil {
return fmt.Errorf("failed to retrieve data sub bucket")
}
// iterate over all db entries in meta sub bucket
err := bucket.ForEach(func(key, pbentry []byte) error {
var entry DbEntry
if err := proto.Unmarshal(pbentry, &entry); err != nil {
return fmt.Errorf("failed to unmarshal from protobuf: %w", err)
}
// retrieve the value from the data sub bucket
value := databucket.Get([]byte(entry.Key))
// we need to make a copy of it, otherwise we'll get an
// "unexpected fault address" error
vc := make([]byte, len(value))
copy(vc, value)
entry.Value = vc
entries = append(entries, &entry)
return nil
})
return err
})
return entries, err
}

210
app/dbentry.pb.go Normal file
View File

@@ -0,0 +1,210 @@
// -*-c++-*-
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.1
// protoc v3.21.12
// source: app/dbentry.proto
package app
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type DbEntry struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id string `protobuf:"bytes,1,opt,name=Id,proto3" json:"Id,omitempty"`
Key string `protobuf:"bytes,2,opt,name=Key,proto3" json:"Key,omitempty"`
Preview string `protobuf:"bytes,3,opt,name=Preview,proto3" json:"Preview,omitempty"`
Tags []string `protobuf:"bytes,4,rep,name=Tags,proto3" json:"Tags,omitempty"`
Created *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=Created,proto3" json:"Created,omitempty"`
Size uint64 `protobuf:"varint,6,opt,name=Size,proto3" json:"Size,omitempty"`
Encrypted bool `protobuf:"varint,7,opt,name=Encrypted,proto3" json:"Encrypted,omitempty"`
Binary bool `protobuf:"varint,8,opt,name=Binary,proto3" json:"Binary,omitempty"`
Value []byte `protobuf:"bytes,9,opt,name=Value,proto3" json:"Value,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DbEntry) Reset() {
*x = DbEntry{}
mi := &file_app_dbentry_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DbEntry) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DbEntry) ProtoMessage() {}
func (x *DbEntry) ProtoReflect() protoreflect.Message {
mi := &file_app_dbentry_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DbEntry.ProtoReflect.Descriptor instead.
func (*DbEntry) Descriptor() ([]byte, []int) {
return file_app_dbentry_proto_rawDescGZIP(), []int{0}
}
func (x *DbEntry) GetId() string {
if x != nil {
return x.Id
}
return ""
}
func (x *DbEntry) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
func (x *DbEntry) GetPreview() string {
if x != nil {
return x.Preview
}
return ""
}
func (x *DbEntry) GetTags() []string {
if x != nil {
return x.Tags
}
return nil
}
func (x *DbEntry) GetCreated() *timestamppb.Timestamp {
if x != nil {
return x.Created
}
return nil
}
func (x *DbEntry) GetSize() uint64 {
if x != nil {
return x.Size
}
return 0
}
func (x *DbEntry) GetEncrypted() bool {
if x != nil {
return x.Encrypted
}
return false
}
func (x *DbEntry) GetBinary() bool {
if x != nil {
return x.Binary
}
return false
}
func (x *DbEntry) GetValue() []byte {
if x != nil {
return x.Value
}
return nil
}
var File_app_dbentry_proto protoreflect.FileDescriptor
var file_app_dbentry_proto_rawDesc = []byte{
0x0a, 0x11, 0x61, 0x70, 0x70, 0x2f, 0x64, 0x62, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x12, 0x03, 0x61, 0x70, 0x70, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74,
0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xef, 0x01, 0x0a, 0x07, 0x44, 0x62,
0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x02, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x03, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x50, 0x72, 0x65, 0x76, 0x69,
0x65, 0x77, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x50, 0x72, 0x65, 0x76, 0x69, 0x65,
0x77, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52,
0x04, 0x54, 0x61, 0x67, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
0x6d, 0x70, 0x52, 0x07, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x53,
0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x53, 0x69, 0x7a, 0x65, 0x12,
0x1c, 0x0a, 0x09, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x18, 0x07, 0x20, 0x01,
0x28, 0x08, 0x52, 0x09, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x12, 0x16, 0x0a,
0x06, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x42,
0x69, 0x6e, 0x61, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09,
0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x1e, 0x5a, 0x1c, 0x67,
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x74, 0x6c, 0x69, 0x6e, 0x64, 0x65,
0x6e, 0x2f, 0x61, 0x6e, 0x79, 0x64, 0x62, 0x2f, 0x61, 0x70, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x33,
}
var (
file_app_dbentry_proto_rawDescOnce sync.Once
file_app_dbentry_proto_rawDescData = file_app_dbentry_proto_rawDesc
)
func file_app_dbentry_proto_rawDescGZIP() []byte {
file_app_dbentry_proto_rawDescOnce.Do(func() {
file_app_dbentry_proto_rawDescData = protoimpl.X.CompressGZIP(file_app_dbentry_proto_rawDescData)
})
return file_app_dbentry_proto_rawDescData
}
var file_app_dbentry_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_app_dbentry_proto_goTypes = []any{
(*DbEntry)(nil), // 0: app.DbEntry
(*timestamppb.Timestamp)(nil), // 1: google.protobuf.Timestamp
}
var file_app_dbentry_proto_depIdxs = []int32{
1, // 0: app.DbEntry.Created:type_name -> google.protobuf.Timestamp
1, // [1:1] is the sub-list for method output_type
1, // [1:1] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_app_dbentry_proto_init() }
func file_app_dbentry_proto_init() {
if File_app_dbentry_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_app_dbentry_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_app_dbentry_proto_goTypes,
DependencyIndexes: file_app_dbentry_proto_depIdxs,
MessageInfos: file_app_dbentry_proto_msgTypes,
}.Build()
File_app_dbentry_proto = out.File
file_app_dbentry_proto_rawDesc = nil
file_app_dbentry_proto_goTypes = nil
file_app_dbentry_proto_depIdxs = nil
}

20
app/dbentry.proto Normal file
View File

@@ -0,0 +1,20 @@
// -*-c++-*-
syntax = "proto3";
package app;
import "google/protobuf/timestamp.proto";
option go_package = "github.com/tlinden/anydb/app";
message DbEntry {
string Id = 1;
string Key = 2;
string Preview = 3;
repeated string Tags = 4;
google.protobuf.Timestamp Created = 5;
uint64 Size = 6;
bool Encrypted = 7;
bool Binary = 8;
bytes Value = 9;
}

View File

@@ -26,7 +26,7 @@ import (
"github.com/tlinden/anydb/common"
)
var Version string = "v0.0.7"
var Version string = "v0.1.0"
type BucketConfig struct {
Encrypt bool
@@ -42,6 +42,7 @@ type Config struct {
NoHumanize bool
Encrypt bool // one entry
CaseInsensitive bool
Fulltext bool
Listen string
Buckets map[string]BucketConfig // config file only

View File

@@ -187,18 +187,18 @@ SUBCOMMANDS
Usage:
Usage:
anydb list [<filter-regex>] [-t <tag>] [-m <mode>] [-n -N] [-T <tpl>] [-i] [flags]
anydb list [<filter-regex> | -t <tag> ] [-m <mode>] [-nNif] [-T <tpl>] [flags]
Aliases:
list, /, ls
list, ls, /, find, search
Flags:
-i, --case-insensitive filter case insensitive
-h, --help help for list
-m, --mode string output format (table|wide|json|template),
wide is a verbose table. (default 'table')
-m, --mode string output format (table|wide|json|template), wide is a verbose table. (default 'table')
-n, --no-headers omit headers in tables
-N, --no-human do not translate to human readable values
-s, --search-fulltext perform a full text search
-t, --tags stringArray tags, multiple allowed
-T, --template string go template for '-m template'
-l, --wide-output output mode: wide
@@ -234,6 +234,10 @@ SUBCOMMANDS
If you want to search case insensitive, add the option "-i".
By default anydb only searches through the keys. If you want to search
through the values as well, then use the "-s" option, which enables
full-text search.
You can - as with the get command - use other output modes. The default
mode is "table". The "wide" mode is, as already mentioned, a more
detailed table. Also supported is "json" mode and "template" mode. For
@@ -293,7 +297,7 @@ SUBCOMMANDS
Usage:
Usage:
anydb export [-o <json filename>] [flags]
anydb export -o <json filename> [flags]
Aliases:
export, dump, backup
@@ -303,11 +307,11 @@ SUBCOMMANDS
-o, --output string output to file
The database dump is a JSON representation of the whole database and
will be printed to STDOUT by default. Redirect it to a file or use the
"-o" option:
will be printed to the file specified with the "-o" option. If you
specify "-" as the filename, it will be written to STDIN.
anydb export > dump.json
anydb export -o dump.json
anydb export -o - > dump.json
Please note, that encrypted values will not be decrypted. This might
change in a future version of anydb.
@@ -319,7 +323,7 @@ SUBCOMMANDS
Usage:
Usage:
anydb import [<json file>] [flags]
anydb import -i <json file> [flags]
Aliases:
import, restore
@@ -329,12 +333,13 @@ SUBCOMMANDS
-h, --help help for import
-t, --tags stringArray tags, multiple allowed
By default the "import" subcommand reads the JSON contents from STDIN.
You might pipe the dump into it or use the option "-r":
The "import" subcommand reads the JSON contents from the file specified
with the "-i" option. If you specify "-" as the filename, it will be
read from STDIN.
anydb import < dump.json
anydb import -r dump.json
cat dump.json | anydb import
anydb import -i - < dump.json
anydb import -i dump.json
cat dump.json | anydb import -i -
If there is already a database, it will be saved by appending a
timestamp and a new database with the contents of the dump will be

View File

@@ -20,7 +20,6 @@ import (
"errors"
"os"
"strings"
"unicode/utf8"
"github.com/spf13/cobra"
"github.com/tlinden/anydb/app"
@@ -124,12 +123,7 @@ func Get(conf *cfg.Config) *cobra.Command {
return err
}
if utf8.ValidString(string(clear)) {
entry.Value = string(clear)
} else {
entry.Bin = clear
}
entry.Value = clear
entry.Encrypted = false
}
@@ -188,7 +182,7 @@ func List(conf *cfg.Config) *cobra.Command {
)
var cmd = &cobra.Command{
Use: "list [<filter-regex>] [-t <tag>] [-m <mode>] [-n -N] [-T <tpl>] [-i]",
Use: "list [<filter-regex> | -t <tag> ] [-m <mode>] [-nNif] [-T <tpl>]",
Short: "List database contents",
Long: `List database contents`,
RunE: func(cmd *cobra.Command, args []string) error {
@@ -212,7 +206,7 @@ func List(conf *cfg.Config) *cobra.Command {
conf.Mode = "wide"
}
entries, err := conf.DB.List(&attr)
entries, err := conf.DB.List(&attr, conf.Fulltext)
if err != nil {
return err
}
@@ -227,10 +221,13 @@ func List(conf *cfg.Config) *cobra.Command {
cmd.PersistentFlags().BoolVarP(&conf.NoHeaders, "no-headers", "n", false, "omit headers in tables")
cmd.PersistentFlags().BoolVarP(&conf.NoHumanize, "no-human", "N", false, "do not translate to human readable values")
cmd.PersistentFlags().BoolVarP(&conf.CaseInsensitive, "case-insensitive", "i", false, "filter case insensitive")
cmd.PersistentFlags().BoolVarP(&conf.Fulltext, "search-fulltext", "s", false, "perform a full text search")
cmd.PersistentFlags().StringArrayVarP(&attr.Tags, "tags", "t", nil, "tags, multiple allowed")
cmd.Aliases = append(cmd.Aliases, "/")
cmd.Aliases = append(cmd.Aliases, "ls")
cmd.Aliases = append(cmd.Aliases, "/")
cmd.Aliases = append(cmd.Aliases, "find")
cmd.Aliases = append(cmd.Aliases, "search")
return cmd
}

View File

@@ -23,7 +23,6 @@ import (
"io"
"os"
"os/exec"
"unicode/utf8"
"github.com/spf13/cobra"
"github.com/tlinden/anydb/app"
@@ -38,16 +37,16 @@ func Export(conf *cfg.Config) *cobra.Command {
)
var cmd = &cobra.Command{
Use: "export [-o <json filename>]",
Short: "Export database to json",
Long: `Export database to json`,
Use: "export -o <json filename>",
Short: "Export database to json file",
Long: `Export database to json file`,
RunE: func(cmd *cobra.Command, args []string) error {
// errors at this stage do not cause the usage to be shown
cmd.SilenceUsage = true
conf.Mode = "json"
entries, err := conf.DB.List(&attr)
entries, err := conf.DB.Getall(&attr)
if err != nil {
return err
}
@@ -56,7 +55,10 @@ func Export(conf *cfg.Config) *cobra.Command {
},
}
cmd.PersistentFlags().StringVarP(&attr.File, "output", "o", "", "output to file")
cmd.PersistentFlags().StringVarP(&attr.File, "output-file", "o", "", "filename or - for STDIN")
if err := cmd.MarkPersistentFlagRequired("output-file"); err != nil {
panic(err)
}
cmd.Aliases = append(cmd.Aliases, "dump")
cmd.Aliases = append(cmd.Aliases, "backup")
@@ -70,7 +72,7 @@ func Import(conf *cfg.Config) *cobra.Command {
)
var cmd = &cobra.Command{
Use: "import [<json file>]",
Use: "import -i <json file>",
Short: "Import database dump",
Long: `Import database dump`,
RunE: func(cmd *cobra.Command, args []string) error {
@@ -87,8 +89,11 @@ func Import(conf *cfg.Config) *cobra.Command {
},
}
cmd.PersistentFlags().StringVarP(&attr.File, "file", "r", "", "Filename or - for STDIN")
cmd.PersistentFlags().StringVarP(&attr.File, "import-file", "i", "", "filename or - for STDIN")
cmd.PersistentFlags().StringArrayVarP(&attr.Tags, "tags", "t", nil, "tags, multiple allowed")
if err := cmd.MarkPersistentFlagRequired("import-file"); err != nil {
panic(err)
}
cmd.Aliases = append(cmd.Aliases, "restore")
@@ -199,7 +204,7 @@ func Edit(conf *cfg.Config) *cobra.Command {
return err
}
if len(entry.Value) == 0 && len(entry.Bin) > 0 {
if len(entry.Value) == 0 && entry.Binary {
return errors.New("key contains binary uneditable content")
}
@@ -216,12 +221,7 @@ func Edit(conf *cfg.Config) *cobra.Command {
return err
}
if utf8.ValidString(string(clear)) {
entry.Value = string(clear)
} else {
entry.Bin = clear
}
entry.Value = clear
entry.Encrypted = false
}
@@ -231,7 +231,7 @@ func Edit(conf *cfg.Config) *cobra.Command {
// save file to a temp file, call the editor with it, read
// it back in and compare the content with the original
// one
newcontent, err := editContent(editor, entry.Value)
newcontent, err := editContent(editor, string(entry.Value))
if err != nil {
return err
}
@@ -241,7 +241,7 @@ func Edit(conf *cfg.Config) *cobra.Command {
Key: attr.Key,
Tags: attr.Tags,
Encrypted: attr.Encrypted,
Val: newcontent,
Val: []byte(newcontent),
}
// encrypt if needed

1
go.mod
View File

@@ -29,4 +29,5 @@ require (
golang.org/x/sys v0.28.0 // indirect
golang.org/x/term v0.27.0 // indirect
golang.org/x/tools v0.22.0 // indirect
google.golang.org/protobuf v1.36.1 // indirect
)

2
go.sum
View File

@@ -89,6 +89,8 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -65,8 +65,6 @@ func ListTemplate(writer io.Writer, conf *cfg.Config, entries app.DbEntries) err
buf := bytes.Buffer{}
for _, row := range entries {
row.Normalize()
buf.Reset()
err = tmpl.Execute(&buf, row)
if err != nil {
@@ -94,31 +92,28 @@ func ListTable(writer io.Writer, conf *cfg.Config, entries app.DbEntries) error
}
for _, row := range entries {
row.Normalize()
if conf.Mode == "wide" {
switch conf.NoHumanize {
case true:
table.Append([]string{
row.Key,
strings.Join(row.Tags, ","),
strconv.Itoa(row.Size),
row.Created.Format("02.01.2006T03:04.05"),
row.Value,
strconv.FormatUint(row.Size, 10),
row.Created.AsTime().Format("02.01.2006T03:04.05"),
row.Preview,
})
default:
table.Append([]string{
row.Key,
strings.Join(row.Tags, ","),
humanize.Bytes(uint64(row.Size)),
//row.Created.Format("02.01.2006T03:04.05"),
humanize.Time(row.Created),
row.Value,
humanize.Time(row.Created.AsTime()),
row.Preview,
})
}
} else {
table.Append([]string{row.Key, row.Value})
table.Append([]string{row.Key, row.Preview})
}
}

View File

@@ -22,7 +22,6 @@ import (
"io"
"os"
"reflect"
"strings"
"github.com/dustin/go-humanize"
"github.com/tlinden/anydb/app"
@@ -40,16 +39,15 @@ func Print(writer io.Writer, conf *cfg.Config, attr *app.DbAttr, entry *app.DbEn
switch conf.Mode {
case "simple", "":
if len(entry.Bin) > 0 {
if entry.Binary {
if isatty {
fmt.Println("binary data omitted")
} else {
os.Stdout.Write(entry.Bin)
os.Stdout.Write(entry.Value)
}
} else {
fmt.Print(entry.Value)
if !strings.HasSuffix(entry.Value, "\n") {
fmt.Print(string(entry.Value))
if entry.Value[entry.Size-1] != '\n' {
// always add a terminal newline
fmt.Println()
}
@@ -62,9 +60,9 @@ func Print(writer io.Writer, conf *cfg.Config, attr *app.DbAttr, entry *app.DbEn
fmt.Println(string(jsonentry))
case "wide":
return ListTable(writer, conf, app.DbEntries{*entry})
return ListTable(writer, conf, app.DbEntries{entry})
case "template":
return ListTemplate(writer, conf, app.DbEntries{*entry})
return ListTemplate(writer, conf, app.DbEntries{entry})
}
return nil
@@ -77,7 +75,6 @@ func WriteFile(writer io.Writer, conf *cfg.Config, attr *app.DbAttr, entry *app.
if attr.File == "-" {
fileHandle = os.Stdout
} else {
fd, err := os.OpenFile(attr.File, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)
if err != nil {
return fmt.Errorf("failed to open file %s for writing: %w", attr.File, err)
@@ -87,17 +84,14 @@ func WriteFile(writer io.Writer, conf *cfg.Config, attr *app.DbAttr, entry *app.
fileHandle = fd
}
if len(entry.Bin) > 0 {
// binary file content
_, err = fileHandle.Write(entry.Bin)
} else {
val := entry.Value
if !strings.HasSuffix(val, "\n") {
// always add a terminal newline
val += "\n"
}
// actually write file content
_, err = fileHandle.Write(entry.Value)
_, err = fileHandle.Write([]byte(val))
if !entry.Binary {
if entry.Value[entry.Size-1] != '\n' {
// always add a terminal newline
_, err = fileHandle.Write([]byte{'\n'})
}
}
if err != nil {

View File

@@ -54,7 +54,7 @@ func RestList(c *fiber.Ctx, conf *cfg.Config) error {
}
// get list
entries, err := conf.DB.List(attr)
entries, err := conf.DB.List(attr, false)
if err != nil {
return JsonStatus(c, fiber.StatusForbidden,
"Unable to list keys: "+err.Error())

View File

@@ -32,3 +32,20 @@ stdout 50
# look if it's inside the db
exec anydb -f test.db ls
stdout datum.*binary-content
# do the same thing with text content, start with a new text entry
exec anydb -f test.db set feed alpha
# which we write to a file
exec anydb -f test.db get feed -o out2.txt
exists out2.txt
# check if its filled (5 bytes + newline)
exec ls -l out2.txt
stdout 6
# compare content
exec cat out2.txt
stdout alpha

View File

@@ -23,10 +23,10 @@ exec anydb -f test.db export -o backup.json
stdout 'database contents exported to backup.json'
# import into new db
exec anydb -f new.db import -r backup.json
exec anydb -f new.db import -i backup.json
stdout 'imported.*entries'
# check contents
exec anydb -f new.db list
exec anydb -f new.db list bar -s
stdout foo.*bar

View File

@@ -37,12 +37,12 @@ exec anydb -f test.db list -t flower
! stdout bar
# list with filter
exec anydb -f test.db list b.r
exec anydb -f test.db list b.r -s
stdout bar
# list with -i filter
exec anydb -f test.db list -i mucha
stdout MUCHA
exec anydb -f test.db list -is mucha
stdout mucha
# get single entry
exec anydb -f test.db get color