Обсуждение: Re: Improve pg_sync_replication_slots() to wait for primary to advance
Hi, Ajin
Thanks for updating the patch.
On Mon, 27 Oct 2025 at 18:47, Ajin Cherian <itsajin@gmail.com> wrote:
> On Fri, Oct 24, 2025 at 8:29 PM shveta malik <shveta.malik@gmail.com> wrote:
>>
>> On Wed, Oct 22, 2025 at 10:25 AM Ajin Cherian <itsajin@gmail.com> wrote:
>> >
>> >
>> > I've modified the comments to reflect the new changes.
>> >
>> > attaching patch v18 with the above changes.
>> >
>>
>> Thanks for the patch. The test is still not clear. Can we please add
>> the test after the test of "Test logical failover slots corresponding
>> to different plugins" finishes instead of adding it in between?
>>
>
> I've rewritten the tests again to make this possible. Attaching v19
> which has the modified tap test.
Here are some comments on the new patch.
1. Given the existence of the foreach_ptr macro, we can switch the usage of
foreach to foreach_ptr.
diff --git a/src/backend/replication/logical/slotsync.c b/src/backend/replication/logical/slotsync.c
index 1b78ffc5ff1..5db51407a82 100644
--- a/src/backend/replication/logical/slotsync.c
+++ b/src/backend/replication/logical/slotsync.c
@@ -872,7 +872,6 @@ fetch_remote_slots(WalReceiverConn *wrconn, List *slot_names)
     if (slot_names != NIL)
     {
-        ListCell   *lc;
         bool        first_slot = true;
         /*
@@ -880,10 +879,8 @@ fetch_remote_slots(WalReceiverConn *wrconn, List *slot_names)
          */
         appendStringInfoString(&query, " AND slot_name IN (");
-        foreach(lc, slot_names)
+        foreach_ptr(char, slot_name, slot_names)
         {
-            char *slot_name = (char *) lfirst(lc);
-
             if (!first_slot)
                 appendStringInfoString(&query, ", ");
@@ -1872,15 +1869,13 @@ static List *
 extract_slot_names(List *remote_slots)
 {
     List        *slot_names = NIL;
-    ListCell    *lc;
     MemoryContext oldcontext;
     /* Switch to long-lived TopMemoryContext to store slot names */
     oldcontext = MemoryContextSwitchTo(TopMemoryContext);
-    foreach(lc, remote_slots)
+    foreach_ptr(RemoteSlot, remote_slot, remote_slots)
     {
-        RemoteSlot *remote_slot = (RemoteSlot *) lfirst(lc);
         char       *slot_name;
         slot_name = pstrdup(remote_slot->name);
2. To append a signal character, switch from appendStringInfoString() to the
more efficient appendStringInfoChar().
+         appendStringInfoString(&query, ")");
3. The query memory can be released immediately after walrcv_exec() because
there are no subsequent references.
@@ -895,6 +892,7 @@ fetch_remote_slots(WalReceiverConn *wrconn, List *slot_names)
     /* Execute the query */
     res = walrcv_exec(wrconn, query.data, SLOTSYNC_COLUMN_COUNT, slotRow);
+    pfree(query.data);
     if (res->status != WALRCV_OK_TUPLES)
         ereport(ERROR,
                 errmsg("could not fetch failover logical slots info from the primary server: %s",
@@ -975,7 +973,6 @@ fetch_remote_slots(WalReceiverConn *wrconn, List *slot_names)
     }
     walrcv_clear_result(res);
-    pfree(query.data);
     return remote_slot_list;
 }
>
> Fujitsu Australia
>
> [2. text/x-diff; v19-0001-Improve-initial-slot-synchronization-in-pg_sync_.patch]...
--
Regards,
Japin Li
ChengDu WenWu Information Technology Co., Ltd.
			
		On Mon, Oct 27, 2025 at 8:22 PM Japin Li <japinli@hotmail.com> wrote:
>
>
> Hi, Ajin
>
> Thanks for updating the patch.
>
> On Mon, 27 Oct 2025 at 18:47, Ajin Cherian <itsajin@gmail.com> wrote:
> > On Fri, Oct 24, 2025 at 8:29 PM shveta malik <shveta.malik@gmail.com> wrote:
> >>
> >> On Wed, Oct 22, 2025 at 10:25 AM Ajin Cherian <itsajin@gmail.com> wrote:
> >> >
> >> >
> >> > I've modified the comments to reflect the new changes.
> >> >
> >> > attaching patch v18 with the above changes.
> >> >
> >>
> >> Thanks for the patch. The test is still not clear. Can we please add
> >> the test after the test of "Test logical failover slots corresponding
> >> to different plugins" finishes instead of adding it in between?
> >>
> >
> > I've rewritten the tests again to make this possible. Attaching v19
> > which has the modified tap test.
>
> Here are some comments on the new patch.
>
> 1. Given the existence of the foreach_ptr macro, we can switch the usage of
> foreach to foreach_ptr.
>
> diff --git a/src/backend/replication/logical/slotsync.c b/src/backend/replication/logical/slotsync.c
> index 1b78ffc5ff1..5db51407a82 100644
> --- a/src/backend/replication/logical/slotsync.c
> +++ b/src/backend/replication/logical/slotsync.c
> @@ -872,7 +872,6 @@ fetch_remote_slots(WalReceiverConn *wrconn, List *slot_names)
>
>         if (slot_names != NIL)
>         {
> -               ListCell   *lc;
>                 bool            first_slot = true;
>
>                 /*
> @@ -880,10 +879,8 @@ fetch_remote_slots(WalReceiverConn *wrconn, List *slot_names)
>                  */
>                 appendStringInfoString(&query, " AND slot_name IN (");
>
> -               foreach(lc, slot_names)
> +               foreach_ptr(char, slot_name, slot_names)
>                 {
> -                       char *slot_name = (char *) lfirst(lc);
> -
>                         if (!first_slot)
>                                 appendStringInfoString(&query, ", ");
>
> @@ -1872,15 +1869,13 @@ static List *
>  extract_slot_names(List *remote_slots)
>  {
>         List            *slot_names = NIL;
> -       ListCell        *lc;
>         MemoryContext oldcontext;
>
>         /* Switch to long-lived TopMemoryContext to store slot names */
>         oldcontext = MemoryContextSwitchTo(TopMemoryContext);
>
> -       foreach(lc, remote_slots)
> +       foreach_ptr(RemoteSlot, remote_slot, remote_slots)
>         {
> -               RemoteSlot *remote_slot = (RemoteSlot *) lfirst(lc);
>                 char       *slot_name;
>
>                 slot_name = pstrdup(remote_slot->name);
>
> 2. To append a signal character, switch from appendStringInfoString() to the
> more efficient appendStringInfoChar().
>
> +               appendStringInfoString(&query, ")");
>
> 3. The query memory can be released immediately after walrcv_exec() because
> there are no subsequent references.
>
> @@ -895,6 +892,7 @@ fetch_remote_slots(WalReceiverConn *wrconn, List *slot_names)
>
>         /* Execute the query */
>         res = walrcv_exec(wrconn, query.data, SLOTSYNC_COLUMN_COUNT, slotRow);
> +       pfree(query.data);
>         if (res->status != WALRCV_OK_TUPLES)
>                 ereport(ERROR,
>                                 errmsg("could not fetch failover logical slots info from the primary server: %s",
> @@ -975,7 +973,6 @@ fetch_remote_slots(WalReceiverConn *wrconn, List *slot_names)
>         }
>
>         walrcv_clear_result(res);
> -       pfree(query.data);
>
>         return remote_slot_list;
>  }
>
Thanks for your review, Japin. Here's patch v20 addressing the comments.
regards,
Ajin Cherian
Fujitsu Australia
			
		Вложения
Hi Ajin,
I have reviewed v20 and got a few comments:
> On Oct 30, 2025, at 18:18, Ajin Cherian <itsajin@gmail.com> wrote:
>
> <v20-0001-Improve-initial-slot-synchronization-in-pg_sync_.patch>
1 - slotsync.c
```
+        if (slot_names)
+            list_free_deep(slot_names);
         /* Cleanup the synced temporary slots */
         ReplicationSlotCleanup(true);
@@ -1762,5 +2026,5 @@ SyncReplicationSlots(WalReceiverConn *wrconn)
         /* We are done with sync, so reset sync flag */
         reset_syncing_flag();
     }
-    PG_END_ENSURE_ERROR_CLEANUP(slotsync_failure_callback, PointerGetDatum(wrconn));
+    PG_END_ENSURE_ERROR_CLEANUP(slotsync_failure_callback, PointerGetDatum(&fparams));
```
I am afraid there is a risk of double memory free. Slot_names has been assigned to fparams.slot_names within the  for
loop,and it’s freed after the loop. If something gets wrong and slotsync_failure_callback() is called, the function
willfree fparams.slot_names again. 
2 - slotsync.c
```
+            /*
+             * Fetch remote slot info for the given slot_names. If slot_names is NIL,
+             * fetch all failover-enabled slots. Note that we reuse slot_names from
+             * the first iteration; re-fetching all failover slots each time could
+             * cause an endless loop. Instead of reprocessing only the pending slots
+             * in each iteration, it's better to process all the slots received in
+             * the first iteration. This ensures that by the time we're done, all
+             * slots reflect the latest values.
+             */
+            remote_slots = fetch_remote_slots(wrconn, slot_names);
+
+            /* Attempt to synchronize slots */
+            some_slot_updated = synchronize_slots(wrconn, remote_slots,
+                                                  &slot_persistence_pending);
+
+            /*
+             * If slot_persistence_pending is true, extract slot names
+             * for future iterations (only needed if we haven't done it yet)
+             */
+            if (slot_names == NIL && slot_persistence_pending)
+            {
+                slot_names = extract_slot_names(remote_slots);
+
+                /* Update the failure structure so that it can be freed on error */
+                fparams.slot_names = slot_names;
+            }
```
I am thinking if that could be a problem. As you now extract_slot_names() only in the first iteration, if a slot is
dropped,and a new slot comes with the same name, will the new slot be incorrectly synced? 
Best regards,
--
Chao Li (Evan)
HighGo Software Co., Ltd.
https://www.highgo.com/
			
		On Thu, 30 Oct 2025 at 19:15, Chao Li <li.evan.chao@gmail.com> wrote:
> Hi Ajin,
>
> I have reviewed v20 and got a few comments:
>
>> On Oct 30, 2025, at 18:18, Ajin Cherian <itsajin@gmail.com> wrote:
>>
>> <v20-0001-Improve-initial-slot-synchronization-in-pg_sync_.patch>
>
> 1 - slotsync.c
> ```
> +        if (slot_names)
> +            list_free_deep(slot_names);
>
>          /* Cleanup the synced temporary slots */
>          ReplicationSlotCleanup(true);
> @@ -1762,5 +2026,5 @@ SyncReplicationSlots(WalReceiverConn *wrconn)
>          /* We are done with sync, so reset sync flag */
>          reset_syncing_flag();
>      }
> -    PG_END_ENSURE_ERROR_CLEANUP(slotsync_failure_callback, PointerGetDatum(wrconn));
> +    PG_END_ENSURE_ERROR_CLEANUP(slotsync_failure_callback, PointerGetDatum(&fparams));
> ```
>
> I am afraid there is a risk of double memory free. Slot_names has been assigned to fparams.slot_names within the  for
loop,and it’s freed after the loop. If something gets wrong and slotsync_failure_callback() is called, the function
willfree fparams.slot_names again. 
>
Agreed.
Maybe we should set the fparams.slot_names to NIL immediately after freeing
the memory.
> 2 - slotsync.c
> ```
> +            /*
> +             * Fetch remote slot info for the given slot_names. If slot_names is NIL,
> +             * fetch all failover-enabled slots. Note that we reuse slot_names from
> +             * the first iteration; re-fetching all failover slots each time could
> +             * cause an endless loop. Instead of reprocessing only the pending slots
> +             * in each iteration, it's better to process all the slots received in
> +             * the first iteration. This ensures that by the time we're done, all
> +             * slots reflect the latest values.
> +             */
> +            remote_slots = fetch_remote_slots(wrconn, slot_names);
> +
> +            /* Attempt to synchronize slots */
> +            some_slot_updated = synchronize_slots(wrconn, remote_slots,
> +                                                  &slot_persistence_pending);
> +
> +            /*
> +             * If slot_persistence_pending is true, extract slot names
> +             * for future iterations (only needed if we haven't done it yet)
> +             */
> +            if (slot_names == NIL && slot_persistence_pending)
> +            {
> +                slot_names = extract_slot_names(remote_slots);
> +
> +                /* Update the failure structure so that it can be freed on error */
> +                fparams.slot_names = slot_names;
> +            }
> ```
>
> I am thinking if that could be a problem. As you now extract_slot_names() only in the first iteration, if a slot is
dropped,and a new slot comes with the same name, will the new slot be incorrectly synced? 
>
The slot name alone is insufficient to distinguish between the old and new
slots.  In this case, the new slot state will overwrite the old.  I see no
harm in this behavior, but please confirm if this is the desired behavior.
--
Regards,
Japin Li
ChengDu WenWu Information Technology Co., Ltd.
			
		On Thu, Oct 30, 2025 at 3:48 PM Ajin Cherian <itsajin@gmail.com> wrote: > > > Thanks for your review, Japin. Here's patch v20 addressing the comments. > Thank You for the patch. Please find a few comment son test: 1) +# until the slot becomes sync-ready (when the standby catches up to the +# slot's restart_lsn). I think it should be 'when the primary server catches up' or 'when the remote slot catches up with the locally reserved position.' 2) +# Attempt to synchronize slots using API. This will initially fail because +# the slot is not yet sync-ready (standby hasn't caught up to slot's restart_lsn), +# but the API will wait and retry. Call the API in a background process. a) 'This will initially fail ' seems like the API will give an error, which is not the case b) 'standby hasn't caught up to slot's restart_lsn' is not correct. We can rephrase to: # Attempt to synchronize slots using the API. The API will continue retrying synchronization until the remote slot catches up with the locally reserved position. 3) +# Enable the Subscription, so that the slot catches up slot --> remote slot 4) +# Create xl_running_xacts records on the primary for which the standby is waiting Shall we rephrase to below or anything better if you have?: Create xl_running_xacts on the primary to speed up restart_lsn advancement. 5) +# Confirm that the logical failover slot is created on the standby and is +# flagged as 'synced' Suggestion: Verify that the logical failover slot is created on the standby, marked as 'synced', and persisted. (It is important to mention persisted because even temporary slot is marked as synced) thanks Shveta
On Fri, Oct 31, 2025 at 11:04 AM shveta malik <shveta.malik@gmail.com> wrote:
>
> On Thu, Oct 30, 2025 at 3:48 PM Ajin Cherian <itsajin@gmail.com> wrote:
> >
> >
> > Thanks for your review, Japin. Here's patch v20 addressing the comments.
> >
>
> Thank You for the patch. Please find a few comment son test:
>
>
> 1)
> +# until the slot becomes sync-ready (when the standby catches up to the
> +# slot's restart_lsn).
>
> I think it should be 'when the primary server catches up' or 'when the
> remote slot catches up with the locally reserved position.'
>
> 2)
> +# Attempt to synchronize slots using API. This will initially fail because
> +# the slot is not yet sync-ready (standby hasn't caught up to slot's
> restart_lsn),
> +# but the API will wait and retry. Call the API in a background process.
>
> a)
> 'This will initially fail ' seems like the API will give an error,
> which is not the case
>
> b) 'standby hasn't caught up to slot's restart_lsn' is not correct.
>
> We can rephrase to:
> # Attempt to synchronize slots using the API. The API will continue
> retrying synchronization until the remote slot catches up with the
> locally reserved position.
>
> 3)
> +# Enable the Subscription, so that the slot catches up
>
> slot --> remote slot
>
> 4)
> +# Create xl_running_xacts records on the primary for which the
> standby is waiting
>
> Shall we rephrase to below or anything better if you have?:
> Create xl_running_xacts on the primary to speed up restart_lsn advancement.
>
> 5)
> +# Confirm that the logical failover slot is created on the standby and is
> +# flagged as 'synced'
>
> Suggestion:
> Verify that the logical failover slot is created on the standby,
> marked as 'synced', and persisted.
>
> (It is important to mention persisted because even temporary slot is
> marked as synced)
>
Shall we remove this change as it does not belong to the current patch
directly? I think it was a suggestion earlier, but we shall remove it.
6)
-# Confirm the synced slot 'lsub1_slot' is retained on the new primary
+# Confirm that the synced slots 'lsub1_slot' and 'snap_test_slot' are
retained on the new primary
 is( $standby1->safe_psql(
  'postgres',
  q{SELECT count(*) = 2 FROM pg_replication_slots WHERE slot_name IN
('lsub1_slot', 'snap_test_slot') AND synced AND NOT temporary;}
+
thanks
Shveta
			
		On Thu, Oct 30, 2025 at 10:16 PM Chao Li <li.evan.chao@gmail.com> wrote:
>
> Hi Ajin,
>
> I have reviewed v20 and got a few comments:
>
> > On Oct 30, 2025, at 18:18, Ajin Cherian <itsajin@gmail.com> wrote:
> >
> > <v20-0001-Improve-initial-slot-synchronization-in-pg_sync_.patch>
>
> 1 - slotsync.c
> ```
> +               if (slot_names)
> +                       list_free_deep(slot_names);
>
>                 /* Cleanup the synced temporary slots */
>                 ReplicationSlotCleanup(true);
> @@ -1762,5 +2026,5 @@ SyncReplicationSlots(WalReceiverConn *wrconn)
>                 /* We are done with sync, so reset sync flag */
>                 reset_syncing_flag();
>         }
> -       PG_END_ENSURE_ERROR_CLEANUP(slotsync_failure_callback, PointerGetDatum(wrconn));
> +       PG_END_ENSURE_ERROR_CLEANUP(slotsync_failure_callback, PointerGetDatum(&fparams));
> ```
>
> I am afraid there is a risk of double memory free. Slot_names has been assigned to fparams.slot_names within the  for
loop,and it’s freed after the loop. If something gets wrong and slotsync_failure_callback() is called, the function
willfree fparams.slot_names again. 
>
Yes, good catch. I have changed to set fparams.slot_names to NIL after
freeing it, so that it isn't freed in slotsync_failure_callback().
> 2 - slotsync.c
> ```
> +                       /*
> +                        * Fetch remote slot info for the given slot_names. If slot_names is NIL,
> +                        * fetch all failover-enabled slots. Note that we reuse slot_names from
> +                        * the first iteration; re-fetching all failover slots each time could
> +                        * cause an endless loop. Instead of reprocessing only the pending slots
> +                        * in each iteration, it's better to process all the slots received in
> +                        * the first iteration. This ensures that by the time we're done, all
> +                        * slots reflect the latest values.
> +                        */
> +                       remote_slots = fetch_remote_slots(wrconn, slot_names);
> +
> +                       /* Attempt to synchronize slots */
> +                       some_slot_updated = synchronize_slots(wrconn, remote_slots,
> +
&slot_persistence_pending);
> +
> +                       /*
> +                        * If slot_persistence_pending is true, extract slot names
> +                        * for future iterations (only needed if we haven't done it yet)
> +                        */
> +                       if (slot_names == NIL && slot_persistence_pending)
> +                       {
> +                               slot_names = extract_slot_names(remote_slots);
> +
> +                               /* Update the failure structure so that it can be freed on error */
> +                               fparams.slot_names = slot_names;
> +                       }
> ```
>
> I am thinking if that could be a problem. As you now extract_slot_names() only in the first iteration, if a slot is
dropped,and a new slot comes with the same name, will the new slot be incorrectly synced? 
It doesn't matter, because the new slot will anyway have a later
restart_lsn and xmin, and all other attributes of the slot are also
updated as part of the sync. So, the old slot on the standby will
resemble the new slot on the primary.
On Fri, Oct 31, 2025 at 3:42 PM Japin Li <japinli@hotmail.com> wrote:
>
> Thanks for updating the patch.  Here are some comments on v20.
>
> 1. Since the content is unchanged, no modification is needed here.
>
> -                * We do not drop the slot because the restart_lsn can be ahead of the
> -                * current location when recreating the slot in the next cycle. It may
> -                * take more time to create such a slot. Therefore, we keep this slot
> -                * and attempt the synchronization in the next cycle.
> +                * We do not drop the slot because the restart_lsn can be
> +                * ahead of the current location when recreating the slot in
> +                * the next cycle. It may take more time to create such a
> +                * slot. Therefore, we keep this slot and attempt the
> +                * synchronization in the next cycle.
>
Changed.
> 2. Could we align the parameter comment style for synchronize_slots() and
> fetch_remote_slots() for better consistency?
>
Fixed.
> 3. Is this redundant? It was already initialized to false during declaration.
>
> +                       /* Reset flag before every iteration */
> +                       slot_persistence_pending = false;
>
Removed.
> 4. A minor nitpick.  The opening brace should be on a new line for style
> consistency.
>
> +                       if (!IsTransactionState()) {
> +                               StartTransactionCommand();
> +                               started_tx = true;
> +                       }
>
Fixed.
> 5. Given that fparams.slot_names is a list, I suggest we replace NULL with NIL
> for type consistency.
>
> +       fparams.slot_names = NULL;
>
Changed.
On Fri, Oct 31, 2025 at 4:34 PM shveta malik <shveta.malik@gmail.com> wrote:
>
> On Thu, Oct 30, 2025 at 3:48 PM Ajin Cherian <itsajin@gmail.com> wrote:
> >
> >
> > Thanks for your review, Japin. Here's patch v20 addressing the comments.
> >
>
> Thank You for the patch. Please find a few comment son test:
>
>
> 1)
> +# until the slot becomes sync-ready (when the standby catches up to the
> +# slot's restart_lsn).
>
> I think it should be 'when the primary server catches up' or 'when the
> remote slot catches up with the locally reserved position.'
>
Changed.
> 2)
> +# Attempt to synchronize slots using API. This will initially fail because
> +# the slot is not yet sync-ready (standby hasn't caught up to slot's
> restart_lsn),
> +# but the API will wait and retry. Call the API in a background process.
>
> a)
> 'This will initially fail ' seems like the API will give an error,
> which is not the case
>
> b) 'standby hasn't caught up to slot's restart_lsn' is not correct.
>
> We can rephrase to:
> # Attempt to synchronize slots using the API. The API will continue
> retrying synchronization until the remote slot catches up with the
> locally reserved position.
>
changed accordingly.
> 3)
> +# Enable the Subscription, so that the slot catches up
>
> slot --> remote slot
>
> 4)
> +# Create xl_running_xacts records on the primary for which the
> standby is waiting
>
> Shall we rephrase to below or anything better if you have?:
> Create xl_running_xacts on the primary to speed up restart_lsn advancement.
>
> 5)
> +# Confirm that the logical failover slot is created on the standby and is
> +# flagged as 'synced'
>
> Suggestion:
> Verify that the logical failover slot is created on the standby,
> marked as 'synced', and persisted.
>
> (It is important to mention persisted because even temporary slot is
> marked as synced)
>
changed as recommended.
I have addressed the above comments in patch v21.
regards,
Ajin Cherian
Fujitsu Australia
			
		Вложения
>
> I have addressed the above comments in patch v21.
>
Thank You. Please find a few comments:
1)
+ fparams.slot_names = slot_names = NIL;
I think it is not needed to set slot_names to NIL.
2)
-    WAIT_EVENT_REPLICATION_SLOTSYNC_MAIN);
+    WAIT_EVENT_REPLICATION_SLOTSYNC_PRIMARY_CATCHUP);
The new name does not seem appropriate. For the slotsync-worker case,
even when the primary is not behind, the worker still waits but it is
not waiting for primary to catch-up. I could not find a better name
except the original one 'WAIT_EVENT_REPLICATION_SLOTSYNC_MAIN'. We can
change the explanation to :
"Waiting in main loop of slot sync worker and slot sync API."
Or
"Waiting in main loop of slot synchronization."
If anyone has any better name suggestions, we can consider changing.
3)
+# Attempt to synchronize slots using API. The API will continue retrying
+# synchronization until the remote slot catches up with the locally reserved
+# position. The API will not return until this happens, to be able to make
+# further calls, call the API in a background process.
Shall we remove 'with the locally reserved position', it’s already
explained in the test header and the comment is good enough even
without it.
4)
+# Confirm log that the slot has been synced after becoming sync-ready.
Shall we just say:
Confirm from the log that the slot is sync-ready now.
5)
 # Synchronize the primary server slots to the standby.
 $standby1->safe_psql('postgres', "SELECT pg_sync_replication_slots();");
@@ -945,6 +1007,7 @@ $subscriber1->safe_psql('postgres',
 is( $standby1->safe_psql(
  'postgres',
  q{SELECT count(*) = 2 FROM pg_replication_slots WHERE slot_name IN
('lsub1_slot', 'snap_test_slot') AND synced AND NOT temporary;}
+
  ),
Redundant change.
thanks
Shveta