use strict;
use Devel::GlobalDestruction;
+use File::KDBX::Constants qw(:bool);
use File::KDBX::Error;
use File::KDBX::Util qw(:uuid);
use Hash::Util::FieldHash qw(fieldhashes);
-use Ref::Util qw(is_arrayref is_plain_hashref is_ref);
+use List::Util qw(any first);
+use Ref::Util qw(is_arrayref is_plain_arrayref is_plain_hashref is_ref);
use Scalar::Util qw(blessed weaken);
use namespace::clean;
our $VERSION = '999.999'; # VERSION
-fieldhashes \my (%KDBX, %PARENT);
+fieldhashes \my (%KDBX, %PARENT, %TXNS, %REFS, %SIGNALS);
=method new
my $self = bless $data // {}, $class;
$self->init(%args);
- $self->_set_default_attributes if !$data;
+ $self->_set_nonlazy_attributes if !$data;
return $self;
}
-sub _set_default_attributes { die 'Not implemented' }
+sub _set_nonlazy_attributes { die 'Not implemented' }
=method init
=method clone
- $object_copy = $object->clone;
+ $object_copy = $object->clone(%options);
$object_copy = File::KDBX::Object->new($object);
-Make a clone of an object. By default the clone is indeed an exact copy that is associated with the same
-database but not actually included in the object tree (i.e. it has no parent). Some options are allowed to
-get different effects:
+Make a clone of an object. By default the clone is indeed an exact copy that is connected to the same database
+but not actually included in the object tree (i.e. it has no parent group). Some options are allowed to get
+different effects:
=for :list
* C<new_uuid> - If set, generate a new UUID for the copy (default: false)
if ($args{relabel} and my $label = $self->label) {
$copy->label("$label - Copy");
}
- if ($args{parent} and my $parent = $self->parent) {
+ if ($args{parent} and my $parent = $self->group) {
$parent->add_object($copy);
}
local $CLONE{history} = 1;
local $CLONE{reference_password} = 0;
local $CLONE{reference_username} = 0;
+ # Clone only the entry's data and manually bless to avoid infinite recursion.
bless Storable::dclone({%$copy}), 'File::KDBX::Entry';
};
- my $txn = $self->begin_work($clone_obj);
+ my $txn = $self->begin_work(snapshot => $clone_obj);
if ($CLONE{reference_password}) {
$self->password("{REF:P\@I:$uuid}");
}
}
$self->uuid(generate_uuid) if $CLONE{new_uuid};
}
+
+ # Dualvars aren't cloned as dualvars, so dualify the icon.
+ $self->icon_id($self->{icon_id}) if defined $self->{icon_id};
}
=attr kdbx
$kdbx = $object->kdbx;
$object->kdbx($kdbx);
-Get or set the L<File::KDBX> instance associated with this object.
+Get or set the L<File::KDBX> instance connected with this object. Throws if the object is disconnected. Other
+object methods might only work if the object is connected to a database and so they might also throw if the
+object is disconnected. If you're not sure if an object is connected, try L</is_connected>.
=cut
delete $KDBX{$self};
}
}
- $KDBX{$self} or throw 'Object is disassociated from a KDBX database', object => $self;
+ $KDBX{$self} or throw 'Object is disconnected', object => $self;
+}
+
+=method is_connected
+
+ $bool = $object->is_connected;
+
+Determine whether or not an object is connected to a database.
+
+=cut
+
+sub is_connected {
+ my $self = shift;
+ return !!eval { $self->kdbx };
}
=method id
=method group
-=method parent
+ $parent_group = $object->group;
+ $object->group($parent_group);
- $group = $object->group;
- # OR equivalently
- $group = $object->parent;
-
-Get the parent group to which an object belongs or C<undef> if it belongs to no group.
+Get or set the parent group to which an object belongs or C<undef> if it belongs to no group.
=cut
sub group {
my $self = shift;
- my $addr = Hash::Util::FieldHash::id($self);
+
+ if (my $new_group = shift) {
+ my $old_group = $self->group;
+ return $new_group if Hash::Util::FieldHash::id($old_group) == Hash::Util::FieldHash::id($new_group);
+ # move to a new parent
+ $self->remove(signal => 0) if $old_group;
+ $self->location_changed('now');
+ $new_group->add_object($self);
+ }
+
+ my $id = Hash::Util::FieldHash::id($self);
if (my $group = $PARENT{$self}) {
my $method = $self->_parent_container;
- for my $object (@{$group->$method}) {
- return $group if $addr == Hash::Util::FieldHash::id($object);
- }
+ return $group if first { $id == Hash::Util::FieldHash::id($_) } @{$group->$method};
delete $PARENT{$self};
}
# always get lineage from root to leaf because the other way requires parent, so it would be recursive
return $group;
}
-sub parent { shift->group(@_) }
-
sub _set_group {
my $self = shift;
if (my $parent = shift) {
# try leaf to root
my @path;
- my $o = $self;
- while ($o = $o->parent) {
- unshift @path, $o;
- last if $base_addr == Hash::Util::FieldHash::id($o);
+ my $object = $self;
+ while ($object = $object->group) {
+ unshift @path, $object;
+ last if $base_addr == Hash::Util::FieldHash::id($object);
}
return \@path if @path && ($base_addr == Hash::Util::FieldHash::id($path[0]) || $path[0]->is_root);
=method remove
- $object = $object->remove;
+ $object = $object->remove(%options);
+
+Remove an object from its parent. If the object is a group, all contained objects stay with the object and so
+are removed as well, just like cutting off a branch takes the leafs as well. Options:
-Remove the object from the database. If the object is a group, all contained objects are removed as well.
+=for :list
+* C<signal> Whether or not to signal the removal to the connected database (default: true)
=cut
sub remove {
my $self = shift;
- my $parent = $self->parent;
- $parent->remove_object($self) if $parent;
+ my $parent = $self->group;
+ $parent->remove_object($self, @_) if $parent;
+ $self->_set_group(undef);
return $self;
}
+=method recycle
+
+ $object = $object->recycle;
+
+Remove an object from its parent and add it to the connected database's recycle bin group.
+
+=cut
+
+sub recycle {
+ my $self = shift;
+ return $self->group($self->kdbx->recycle_bin);
+}
+
+=method recycle_or_remove
+
+ $object = $object->recycle_or_remove;
+
+Recycle or remove an object, depending on the connected database's L<File::KDBX/recycle_bin_enabled>. If the
+object is not connected to a database or is already in the recycle bin, remove it.
+
+=cut
+
+sub recycle_or_remove {
+ my $self = shift;
+ my $kdbx = eval { $self->kdbx };
+ if ($kdbx && $kdbx->recycle_bin_enabled && !$self->is_recycled) {
+ $self->recycle;
+ }
+ else {
+ $self->remove;
+ }
+}
+
+=method is_recycled
+
+ $bool = $object->is_recycled;
+
+Get whether or not an object is in a recycle bin.
+
+=cut
+
+sub is_recycled {
+ my $self = shift;
+ eval { $self->kdbx } or return FALSE;
+ return !!($self->group && any { $_->is_recycle_bin } @{$self->lineage});
+}
+
+##############################################################################
+
=method tag_list
@tags = $entry->tag_list;
$object->custom_data(%data);
$object->custom_data(key => $value, %data);
-Get and set custom data. Custom data is metadata associated with an object.
+Get and set custom data. Custom data is metadata associated with an object. It is a set of key-value pairs
+used to store arbitrary data, usually used by software like plug-ins to keep track of state rather than by end
+users.
Each data item can have a few attributes associated with it.
return $data->{value};
}
+##############################################################################
+
+=method begin_work
+
+ $txn = $object->begin_work(%options);
+ $object->begin_work(%options);
+
+Begin a new transaction. Returns a L<File::KDBX::Transaction> object that can be scoped to ensure a rollback
+occurs if exceptions are thrown. Alternatively, if called in void context, there will be no
+B<File::KDBX::Transaction> and it is instead your responsibility to call L</commit> or L</rollback> as
+appropriate. It is undefined behavior to call these if a B<File::KDBX::Transaction> exists. Recursive
+transactions are allowed.
+
+Signals created during a transaction are delayed until all transactions are resolved. If the outermost
+transaction is committed, then the signals are de-duplicated and delivered. Otherwise the signals are dropped.
+This means that the KDBX database will not fix broken references or mark itself dirty until after the
+transaction is committed.
+
+How it works: With the beginning of a transaction, a snapshot of the object is created. In the event of
+a rollback, the object's data is replaced with data from the snapshot.
+
+By default, the snapshot is shallow (i.e. does not include subroups, entries or historical entries). This
+means that only modifications to the object itself (its data, fields, strings, etc.) are atomic; modifications
+to subroups etc., including adding or removing items, are auto-committed instantly and will persist regardless
+of the result of the pending transaction. You can override this for groups, entries and history independently
+using options:
+
+=for :list
+* C<entries> - If set, snapshot entries within a group, deeply (default: false)
+* C<groups> - If set, snapshot subroups within a group, deeply (default: false)
+* C<history> - If set, snapshot historical entries within an entry (default: false)
+
+For example, if you begin a transaction on a group object using the C<entries> option, like this:
+
+ $group->begin_work(entries => 1);
+
+Then if you modify any of the group's entries OR add new entries OR delete entries, all of that will be undone
+if the transaction is rolled back. With a default-configured transaction, however, changes to entries are kept
+even if the transaction is rolled back.
+
+=cut
+
+sub begin_work {
+ my $self = shift;
+
+ if (defined wantarray) {
+ require File::KDBX::Transaction;
+ return File::KDBX::Transaction->new($self, @_);
+ }
+
+ my %args = @_;
+ my $orig = $args{snapshot} // do {
+ my $c = $self->clone(
+ entries => $args{entries} // 0,
+ groups => $args{groups} // 0,
+ history => $args{history} // 0,
+ );
+ $c->{entries} = $self->{entries} if !$args{entries};
+ $c->{groups} = $self->{groups} if !$args{groups};
+ $c->{history} = $self->{history} if !$args{history};
+ $c;
+ };
+
+ my $id = Hash::Util::FieldHash::id($orig);
+ _save_references($id, $self, $orig);
+
+ $self->_signal_begin_work;
+
+ push @{$self->_txns}, $orig;
+}
+
+=method commit
+
+ $object->commit;
+
+Commit a transaction, making updates to C<$object> permanent. Returns itself to allow method chaining.
+
+=cut
+
+sub commit {
+ my $self = shift;
+ my $orig = pop @{$self->_txns} or return $self;
+ $self->_commit($orig);
+ my $signals = $self->_signal_commit;
+ $self->_signal_send($signals) if !$self->_in_txn;
+ return $self;
+}
+
+=method rollback
+
+ $object->rollback;
+
+Roll back the most recent transaction, throwing away any updates to the L</object> made since the transaction
+began. Returns itself to allow method chaining.
+
+=cut
+
+sub rollback {
+ my $self = shift;
+
+ my $orig = pop @{$self->_txns} or return $self;
+
+ my $id = Hash::Util::FieldHash::id($orig);
+ _restore_references($id, $orig);
+
+ $self->_signal_rollback;
+
+ return $self;
+}
+
+# Get whether or not there is at least one pending transaction.
+sub _in_txn { scalar @{$_[0]->_txns} }
+
+# Get an array ref of pending transactions.
+sub _txns { $TXNS{$_[0]} //= [] }
+
+# The _commit hook notifies subclasses that a commit has occurred.
+sub _commit { die 'Not implemented' }
+
+# Get a reference to an object that represents an object's committed state. If there is no pending
+# transaction, this is just $self. If there is a transaction, this is the snapshot take before the transaction
+# began. This method is private because it provides direct access to the actual snapshot. It is important that
+# the snapshot not be changed or a rollback would roll back to an altered state.
+# This is used by File::KDBX::Dumper::XML so as to not dump uncommitted changes.
+sub _committed {
+ my $self = shift;
+ my ($orig) = @{$self->_txns};
+ return $orig // $self;
+}
+
+# In addition to cloning an object when beginning work, we also keep track its hashrefs and arrayrefs
+# internally so that we can restore to the very same structures in the case of a rollback.
+sub _save_references {
+ my $id = shift;
+ my $self = shift;
+ my $orig = shift;
+
+ if (is_plain_arrayref($orig)) {
+ for (my $i = 0; $i < @$orig; ++$i) {
+ _save_references($id, $self->[$i], $orig->[$i]);
+ }
+ $REFS{$id}{Hash::Util::FieldHash::id($orig)} = $self;
+ }
+ elsif (is_plain_hashref($orig) || (blessed $orig && $orig->isa(__PACKAGE__))) {
+ for my $key (keys %$orig) {
+ _save_references($id, $self->{$key}, $orig->{$key});
+ }
+ $REFS{$id}{Hash::Util::FieldHash::id($orig)} = $self;
+ }
+}
+
+# During a rollback, copy data from the snapshot back into the original internal structures.
+sub _restore_references {
+ my $id = shift;
+ my $orig = shift // return;
+ my $self = delete $REFS{$id}{Hash::Util::FieldHash::id($orig) // ''} // return $orig;
+
+ if (is_plain_arrayref($orig)) {
+ @$self = map { _restore_references($id, $_) } @$orig;
+ }
+ elsif (is_plain_hashref($orig) || (blessed $orig && $orig->isa(__PACKAGE__))) {
+ for my $key (keys %$orig) {
+ # next if is_ref($orig->{$key}) &&
+ # (Hash::Util::FieldHash::id($self->{$key}) // 0) == Hash::Util::FieldHash::id($orig->{$key});
+ $self->{$key} = _restore_references($id, $orig->{$key});
+ }
+ }
+
+ return $self;
+}
+
+##############################################################################
+
+sub _signal {
+ my $self = shift;
+ my $type = shift;
+
+ if ($self->_in_txn) {
+ my $stack = $self->_signal_stack;
+ my $queue = $stack->[-1];
+ push @$queue, [$type, @_];
+ }
+
+ $self->_signal_send([[$type, @_]]);
+
+ return $self;
+}
+
+sub _signal_stack { $SIGNALS{$_[0]} //= [] }
+
+sub _signal_begin_work {
+ my $self = shift;
+ push @{$self->_signal_stack}, [];
+}
+
+sub _signal_commit {
+ my $self = shift;
+ my $signals = pop @{$self->_signal_stack};
+ my $previous = $self->_signal_stack->[-1] // [];
+ push @$previous, @$signals;
+ return $previous;
+}
+
+sub _signal_rollback {
+ my $self = shift;
+ pop @{$self->_signal_stack};
+}
+
+sub _signal_send {
+ my $self = shift;
+ my $signals = shift // [];
+
+ my $kdbx = $KDBX{$self} or return;
+
+ # de-duplicate, keeping the most recent signal for each type
+ my %seen;
+ my @signals = grep { !$seen{$_->[0]}++ } reverse @$signals;
+
+ for my $sig (reverse @signals) {
+ $kdbx->_handle_signal($self, @$sig);
+ }
+}
+
+##############################################################################
+
sub _wrap_group {
my $self = shift;
my $group = shift;
There is some functionality shared by both types of objects, and that's what this class provides.
+Each object can be connected with a L<File::KDBX> database or be disconnected. A disconnected object exists in
+memory but will not be persisted when dumping a database. It is also possible for an object to be connected
+with a database but not be part of the object tree (i.e. is not the root group or any subroup or entry).
+A disconnected object or an object not part of the object tree of a database can be added to a database using
+one of:
+
+=for :list
+* L<File::KDBX/add_entry>
+* L<File::KDBX/add_group>
+* L<File::KDBX::Group/add_entry>
+* L<File::KDBX::Group/add_group>
+* L<File::KDBX::Entry/add_historical_entry>
+
+It is possible to copy or move objects between databases, but B<DO NOT> include the same object in more
+than one database at once or there could be some strange aliasing effects (i.e. changes in one database might
+effect another in unexpected ways). This could lead to difficult-to-debug problems. It is similarly not safe
+or valid to add the same object multiple times to the same database. For example:
+
+ my $entry = File::KDBX::Entry->(title => 'Whatever');
+
+ # DO NOT DO THIS:
+ $kdbx->add_entry($entry);
+ $another_kdbx->add_entry($entry);
+
+ # DO NOT DO THIS:
+ $kdbx->add_entry($entry);
+ $kdbx->add_entry($entry); # again
+
+Instead, do this:
+
+ # Copy an entry to multiple databases:
+ $kdbx->add_entry($entry);
+ $another_kdbx->add_entry($entry->clone);
+
+ # OR move an existing entry from one database to another:
+ $another_kdbx->add_entry($entry->remove);
+
+=attr uuid
+
+128-bit UUID identifying the object within the connected database.
+
+=attr icon_id
+
+Integer representing a default icon. See L<File::KDBX::Constants/":icon"> for valid values.
+
+=attr custom_icon_uuid
+
+128-bit UUID identifying a custom icon within the connected database.
+
+=attr tags
+
+Text string with arbitrary tags which can be used to build a taxonomy.
+
+=attr previous_parent_group
+
+128-bit UUID identifying a group within the connected database the previously contained the object.
+
+=attr last_modification_time
+
+Date and time when the entry was last modified.
+
+=attr creation_time
+
+Date and time when the entry was created.
+
+=attr last_access_time
+
+Date and time when the entry was last accessed.
+
+=attr expiry_time
+
+Date and time when the entry expired or will expire.
+
+=attr expires
+
+Boolean value indicating whether or not an entry is expired.
+
+=attr usage_count
+
+The number of times an entry has been used, which typically means how many times the B<Password> string has
+been accessed.
+
+=attr location_changed
+
+Date and time when the entry was last moved to a different parent group.
+
=cut