Re: Analyze on large changes...

Поиск
Список
Период
Сортировка
От Bruce Momjian
Тема Re: Analyze on large changes...
Дата
Msg-id 200206112146.g5BLk5719228@candle.pha.pa.us
обсуждение исходный текст
Ответ на Re: Analyze on large changes...  (Tom Lane <tgl@sss.pgh.pa.us>)
Ответы Re: Analyze on large changes...  (Bruce Momjian <pgman@candle.pha.pa.us>)
Список pgsql-hackers
Tom Lane wrote:
> I tried to repeat this:
>
> regression=# begin;
> BEGIN
> regression=# create table foo (f1 int);
> CREATE
> regression=# insert into foo [ ... some data ... ]
>
> regression=# analyze foo;
> ERROR:  ANALYZE cannot run inside a BEGIN/END block
>
> This seems a tad silly; I can't see any reason why ANALYZE couldn't be
> done inside a BEGIN block.  I think this is just a hangover from
> ANALYZE's origins as part of VACUUM.  Can anyone see a reason not to
> allow it?

The following patch allows analyze to be run inside a transaction.
Vacuum and vacuum analyze still can not be run in a transaction.

--
  Bruce Momjian                        |  http://candle.pha.pa.us
  pgman@candle.pha.pa.us               |  (610) 853-3000
  +  If your life is a hard drive,     |  830 Blythe Avenue
  +  Christ can be your backup.        |  Drexel Hill, Pennsylvania 19026
Index: src/backend/commands/analyze.c
===================================================================
RCS file: /cvsroot/pgsql/src/backend/commands/analyze.c,v
retrieving revision 1.35
diff -c -r1.35 analyze.c
*** src/backend/commands/analyze.c    24 May 2002 18:57:55 -0000    1.35
--- src/backend/commands/analyze.c    11 Jun 2002 21:38:51 -0000
***************
*** 156,170 ****
          elevel = DEBUG1;

      /*
-      * Begin a transaction for analyzing this relation.
-      *
-      * Note: All memory allocated during ANALYZE will live in
-      * TransactionCommandContext or a subcontext thereof, so it will all
-      * be released by transaction commit at the end of this routine.
-      */
-     StartTransactionCommand();
-
-     /*
       * Check for user-requested abort.    Note we want this to be inside a
       * transaction, so xact.c doesn't issue useless WARNING.
       */
--- 156,161 ----
***************
*** 177,186 ****
      if (!SearchSysCacheExists(RELOID,
                                ObjectIdGetDatum(relid),
                                0, 0, 0))
-     {
-         CommitTransactionCommand();
          return;
-     }

      /*
       * Open the class, getting only a read lock on it, and check
--- 168,174 ----
***************
*** 196,202 ****
              elog(WARNING, "Skipping \"%s\" --- only table or database owner can ANALYZE it",
                   RelationGetRelationName(onerel));
          relation_close(onerel, AccessShareLock);
-         CommitTransactionCommand();
          return;
      }

--- 184,189 ----
***************
*** 211,217 ****
              elog(WARNING, "Skipping \"%s\" --- can not process indexes, views or special system tables",
                   RelationGetRelationName(onerel));
          relation_close(onerel, AccessShareLock);
-         CommitTransactionCommand();
          return;
      }

--- 198,203 ----
***************
*** 222,228 ****
          strcmp(RelationGetRelationName(onerel), StatisticRelationName) == 0)
      {
          relation_close(onerel, AccessShareLock);
-         CommitTransactionCommand();
          return;
      }

--- 208,213 ----
***************
*** 283,289 ****
      if (attr_cnt <= 0)
      {
          relation_close(onerel, NoLock);
-         CommitTransactionCommand();
          return;
      }

--- 268,273 ----
***************
*** 370,378 ****
       * entries we made in pg_statistic.)
       */
      relation_close(onerel, NoLock);
-
-     /* Commit and release working memory */
-     CommitTransactionCommand();
  }

  /*
--- 354,359 ----
Index: src/backend/commands/vacuum.c
===================================================================
RCS file: /cvsroot/pgsql/src/backend/commands/vacuum.c,v
retrieving revision 1.226
diff -c -r1.226 vacuum.c
*** src/backend/commands/vacuum.c    24 May 2002 18:57:56 -0000    1.226
--- src/backend/commands/vacuum.c    11 Jun 2002 21:38:59 -0000
***************
*** 110,117 ****


  /* non-export function prototypes */
- static void vacuum_init(VacuumStmt *vacstmt);
- static void vacuum_shutdown(VacuumStmt *vacstmt);
  static List *getrels(const RangeVar *vacrel, const char *stmttype);
  static void vac_update_dbstats(Oid dbid,
                     TransactionId vacuumXID,
--- 110,115 ----
***************
*** 178,190 ****
       * user's transaction too, which would certainly not be the desired
       * behavior.
       */
!     if (IsTransactionBlock())
          elog(ERROR, "%s cannot run inside a BEGIN/END block", stmttype);

      /* Running VACUUM from a function would free the function context */
!     if (!MemoryContextContains(QueryContext, vacstmt))
          elog(ERROR, "%s cannot be executed from a function", stmttype);
!
      /*
       * Send info about dead objects to the statistics collector
       */
--- 176,188 ----
       * user's transaction too, which would certainly not be the desired
       * behavior.
       */
!     if (vacstmt->vacuum && IsTransactionBlock())
          elog(ERROR, "%s cannot run inside a BEGIN/END block", stmttype);

      /* Running VACUUM from a function would free the function context */
!     if (vacstmt->vacuum && !MemoryContextContains(QueryContext, vacstmt))
          elog(ERROR, "%s cannot be executed from a function", stmttype);
!
      /*
       * Send info about dead objects to the statistics collector
       */
***************
*** 207,215 ****
      vrl = getrels(vacstmt->relation, stmttype);

      /*
!      * Start up the vacuum cleaner.
       */
!     vacuum_init(vacstmt);

      /*
       * Process each selected relation.    We are careful to process each
--- 205,255 ----
      vrl = getrels(vacstmt->relation, stmttype);

      /*
!      *        Formerly, there was code here to prevent more than one VACUUM from
!      *        executing concurrently in the same database.  However, there's no
!      *        good reason to prevent that, and manually removing lockfiles after
!      *        a vacuum crash was a pain for dbadmins.  So, forget about lockfiles,
!      *        and just rely on the locks we grab on each target table
!      *        to ensure that there aren't two VACUUMs running on the same table
!      *        at the same time.
!      *
!      *        The strangeness with committing and starting transactions in the
!      *        init and shutdown routines is due to the fact that the vacuum cleaner
!      *        is invoked via an SQL command, and so is already executing inside
!      *        a transaction.    We need to leave ourselves in a predictable state
!      *        on entry and exit to the vacuum cleaner.  We commit the transaction
!      *        started in PostgresMain() inside vacuum_init(), and start one in
!      *        vacuum_shutdown() to match the commit waiting for us back in
!      *        PostgresMain().
       */
!     if (vacstmt->vacuum)
!     {
!         if (vacstmt->relation == NULL)
!         {
!             /*
!              * Compute the initially applicable OldestXmin and FreezeLimit
!              * XIDs, so that we can record these values at the end of the
!              * VACUUM. Note that individual tables may well be processed with
!              * newer values, but we can guarantee that no (non-shared)
!              * relations are processed with older ones.
!              *
!              * It is okay to record non-shared values in pg_database, even though
!              * we may vacuum shared relations with older cutoffs, because only
!              * the minimum of the values present in pg_database matters.  We
!              * can be sure that shared relations have at some time been
!              * vacuumed with cutoffs no worse than the global minimum; for, if
!              * there is a backend in some other DB with xmin = OLDXMIN that's
!              * determining the cutoff with which we vacuum shared relations,
!              * it is not possible for that database to have a cutoff newer
!              * than OLDXMIN recorded in pg_database.
!              */
!             vacuum_set_xid_limits(vacstmt, false,
!                                   &initialOldestXmin, &initialFreezeLimit);
!         }
!
!         /* matches the StartTransaction in PostgresMain() */
!         CommitTransactionCommand();
!     }

      /*
       * Process each selected relation.    We are careful to process each
***************
*** 225,305 ****
          if (vacstmt->vacuum)
              vacuum_rel(relid, vacstmt, RELKIND_RELATION);
          if (vacstmt->analyze)
              analyze_rel(relid, vacstmt);
      }

      /* clean up */
!     vacuum_shutdown(vacstmt);
! }
!
! /*
!  *    vacuum_init(), vacuum_shutdown() -- start up and shut down the vacuum cleaner.
!  *
!  *        Formerly, there was code here to prevent more than one VACUUM from
!  *        executing concurrently in the same database.  However, there's no
!  *        good reason to prevent that, and manually removing lockfiles after
!  *        a vacuum crash was a pain for dbadmins.  So, forget about lockfiles,
!  *        and just rely on the locks we grab on each target table
!  *        to ensure that there aren't two VACUUMs running on the same table
!  *        at the same time.
!  *
!  *        The strangeness with committing and starting transactions in the
!  *        init and shutdown routines is due to the fact that the vacuum cleaner
!  *        is invoked via an SQL command, and so is already executing inside
!  *        a transaction.    We need to leave ourselves in a predictable state
!  *        on entry and exit to the vacuum cleaner.  We commit the transaction
!  *        started in PostgresMain() inside vacuum_init(), and start one in
!  *        vacuum_shutdown() to match the commit waiting for us back in
!  *        PostgresMain().
!  */
! static void
! vacuum_init(VacuumStmt *vacstmt)
! {
!     if (vacstmt->vacuum && vacstmt->relation == NULL)
      {
!         /*
!          * Compute the initially applicable OldestXmin and FreezeLimit
!          * XIDs, so that we can record these values at the end of the
!          * VACUUM. Note that individual tables may well be processed with
!          * newer values, but we can guarantee that no (non-shared)
!          * relations are processed with older ones.
!          *
!          * It is okay to record non-shared values in pg_database, even though
!          * we may vacuum shared relations with older cutoffs, because only
!          * the minimum of the values present in pg_database matters.  We
!          * can be sure that shared relations have at some time been
!          * vacuumed with cutoffs no worse than the global minimum; for, if
!          * there is a backend in some other DB with xmin = OLDXMIN that's
!          * determining the cutoff with which we vacuum shared relations,
!          * it is not possible for that database to have a cutoff newer
!          * than OLDXMIN recorded in pg_database.
!          */
!         vacuum_set_xid_limits(vacstmt, false,
!                               &initialOldestXmin, &initialFreezeLimit);
!     }
!
!     /* matches the StartTransaction in PostgresMain() */
!     CommitTransactionCommand();
! }
!
! static void
! vacuum_shutdown(VacuumStmt *vacstmt)
! {
!     /* on entry, we are not in a transaction */

!     /* matches the CommitTransaction in PostgresMain() */
!     StartTransactionCommand();

!     /*
!      * If we did a database-wide VACUUM, update the database's pg_database
!      * row with info about the transaction IDs used, and try to truncate
!      * pg_clog.
!      */
!     if (vacstmt->vacuum && vacstmt->relation == NULL)
!     {
!         vac_update_dbstats(MyDatabaseId,
!                            initialOldestXmin, initialFreezeLimit);
!         vac_truncate_clog(initialOldestXmin, initialFreezeLimit);
      }

      /*
--- 265,303 ----
          if (vacstmt->vacuum)
              vacuum_rel(relid, vacstmt, RELKIND_RELATION);
          if (vacstmt->analyze)
+         {
+             /* If we vacuumed, use new transaction for analyze. */
+             if (vacstmt->vacuum)
+                 StartTransactionCommand();
              analyze_rel(relid, vacstmt);
+             if (vacstmt->vacuum)
+                 CommitTransactionCommand();
+ /*
+             else
+                 MemoryContextReset();
+ */
+         }
      }

      /* clean up */
!     if (vacstmt->vacuum)
      {
!         /* on entry, we are not in a transaction */

!         /* matches the CommitTransaction in PostgresMain() */
!         StartTransactionCommand();

!         /*
!          * If we did a database-wide VACUUM, update the database's pg_database
!          * row with info about the transaction IDs used, and try to truncate
!          * pg_clog.
!          */
!         if (vacstmt->relation == NULL)
!         {
!             vac_update_dbstats(MyDatabaseId,
!                                initialOldestXmin, initialFreezeLimit);
!             vac_truncate_clog(initialOldestXmin, initialFreezeLimit);
!         }
      }

      /*

В списке pgsql-hackers по дате отправления:

Предыдущее
От: Bruce Momjian
Дата:
Сообщение: Re: New string functions; initdb required
Следующее
От: Bruce Momjian
Дата:
Сообщение: Re: Analyze on large changes...