Обсуждение: BUG #17158: Distinct ROW fails with Postgres 14

Поиск
Список
Период
Сортировка

BUG #17158: Distinct ROW fails with Postgres 14

От
PG Bug reporting form
Дата:
The following bug has been logged on the website:

Bug reference:      17158
Logged by:          sait talha nisanci
Email address:      sait.nisanci@microsoft.com
PostgreSQL version: 14beta3
Operating system:   Ubuntu 20.04
Description:

Hi,

```
Create or replace function test_jsonb() returns jsonb as
$$
begin
    return '{"test_json": "test"}';
end;
$$ language plpgsql;

CREATE TABLE local
(
    dist_key bigint PRIMARY KEY,
    col1 int[], col2 int[][], col3 int [][][],
    col4 varchar[], col5 varchar[][], col6 varchar [][][],
    col70 bit, col7 bit[], col8 bit[][], col9 bit [][][],
    col10 bit varying(10),
    col11 bit varying(10)[], col12 bit varying(10)[][], col13 bit
varying(10)[][][],
    col14 bytea, col15 bytea[], col16 bytea[][], col17 bytea[][][],
    col18 boolean, col19 boolean[], col20 boolean[][], col21 boolean[][][],
    col22 inet, col23 inet[], col24 inet[][], col25 inet[][][],
    col26 macaddr, col27 macaddr[], col28 macaddr[][], col29 macaddr[][][],
    col30 numeric, col32 numeric[], col33 numeric[][], col34 numeric[][][],
    col35 jsonb, col36 jsonb[], col37 jsonb[][], col38 jsonb[][][]
);

INSERT INTO local (dist_key,col1, col2, col3, col4, col5, col6, col70, col7,
col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18,
col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29,
col30, col32, col33, col34, col35, col36, col37, col38)
VALUES (1,ARRAY[1], ARRAY[ARRAY[0,0,0]], ARRAY[ARRAY[ARRAY[0,0,0]]],
ARRAY['1'], ARRAY[ARRAY['0','0','0']], ARRAY[ARRAY[ARRAY['0','0','0']]],
'1', ARRAY[b'1'], ARRAY[ARRAY[b'0',b'0',b'0']],
ARRAY[ARRAY[ARRAY[b'0',b'0',b'0']]], '11101',ARRAY[b'1'],
ARRAY[ARRAY[b'01',b'01',b'01']], ARRAY[ARRAY[ARRAY[b'011',b'110',b'0000']]],
'\xb4a8e04c0b', ARRAY['\xb4a8e04c0b'::BYTEA],
ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA, '\xb4a8e04c0b'::BYTEA,
'\xb4a8e04c0b'::BYTEA]],
ARRAY[ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]]],
'1', ARRAY[TRUE], ARRAY[ARRAY[1::boolean,TRUE,FALSE]],
ARRAY[ARRAY[ARRAY[1::boolean,TRUE,FALSE]]], INET '192.168.1/24', ARRAY[INET
'192.168.1.1'], ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1',
'192.168.1/24']], ARRAY[ARRAY[ARRAY[INET '0.0.0.0', '0.0.0.0/32',
'::ffff:fff0:1', '192.168.1/24']]],MACADDR '08:00:2b:01:02:03',
ARRAY[MACADDR '08:00:2b:01:02:03'], ARRAY[ARRAY[MACADDR '08002b-010203',
MACADDR '08002b-010203', '08002b010203']], ARRAY[ARRAY[ARRAY[MACADDR
'08002b-010203', MACADDR '08002b-010203', '08002b010203']]], 690,
ARRAY[1.1], ARRAY[ARRAY[0,0.111,0.15]], ARRAY[ARRAY[ARRAY[0,0,0]]],
test_jsonb(), ARRAY[test_jsonb()],
ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]],
ARRAY[ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]]]),
       (2,ARRAY[1,2,3], ARRAY[ARRAY[1,2,3], ARRAY[5,6,7]],
ARRAY[ARRAY[ARRAY[1,2,3]], ARRAY[ARRAY[5,6,7]], ARRAY[ARRAY[1,2,3]],
ARRAY[ARRAY[5,6,7]]], ARRAY['1','2','3'], ARRAY[ARRAY['1','2','3'],
ARRAY['5','6','7']], ARRAY[ARRAY[ARRAY['1','2','3']],
ARRAY[ARRAY['5','6','7']], ARRAY[ARRAY['1','2','3']],
ARRAY[ARRAY['5','6','7']]], '0', ARRAY[b'1',b'0',b'0'],
ARRAY[ARRAY[b'1',b'1',b'0'], ARRAY[b'0',b'0',b'1']],
ARRAY[ARRAY[ARRAY[b'1',b'1',b'1']], ARRAY[ARRAY[b'1','0','0']],
ARRAY[ARRAY[b'1','1','1']], ARRAY[ARRAY[b'0','0','0']]], '00010',
ARRAY[b'11',b'10',b'01'], ARRAY[ARRAY[b'11',b'010',b'101'],
ARRAY[b'101',b'01111',b'1000001']],
ARRAY[ARRAY[ARRAY[b'10000',b'111111',b'1101010101']],
ARRAY[ARRAY[b'1101010','0','1']], ARRAY[ARRAY[b'1','1','11111111']],
ARRAY[ARRAY[b'0000000','0','0']]], '\xb4a8e04c0b',
ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA],
ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA],
ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]],
ARRAY[ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]],
ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]],
ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]],
ARRAY[ARRAY['\xb4a8e04c0b'::BYTEA,'\x18a232a678'::BYTEA,'\x38b2697632'::BYTEA]]],
'true', ARRAY[1::boolean,TRUE,FALSE], ARRAY[ARRAY[1::boolean,TRUE,FALSE],
ARRAY[1::boolean,TRUE,FALSE]], ARRAY[ARRAY[ARRAY[1::boolean,TRUE,FALSE]],
ARRAY[ARRAY[1::boolean,TRUE,FALSE]], ARRAY[ARRAY[1::boolean,TRUE,FALSE]],
ARRAY[ARRAY[1::boolean,TRUE,FALSE]]],'0.0.0.0/32', ARRAY[INET '0.0.0.0',
'0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24'], ARRAY[ARRAY[INET '0.0.0.0',
'0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']], ARRAY[ARRAY[ARRAY[INET
'0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']], ARRAY[ARRAY[INET
'0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']], ARRAY[ARRAY[INET
'0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']], ARRAY[ARRAY[INET
'0.0.0.0', '0.0.0.0/32', '::ffff:fff0:1', '192.168.1/24']]],
'0800.2b01.0203', ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203',
'08002b010203'], ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR
'08002b-010203', '08002b010203']], ARRAY[ARRAY[ARRAY[MACADDR
'08002b-010203', MACADDR '08002b-010203', '08002b010203']],
ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR '08002b-010203',
'08002b010203']], ARRAY[ARRAY[MACADDR '08002b-010203', MACADDR
'08002b-010203', '08002b010203']], ARRAY[ARRAY[MACADDR '08002b-010203',
MACADDR '08002b-010203', '08002b010203']]], 0.99, ARRAY[1.1,2.22,3.33],
ARRAY[ARRAY[1.55,2.66,3.88], ARRAY[11.5,10101.6,7111.1]],
ARRAY[ARRAY[ARRAY[1,2,3]], ARRAY[ARRAY[5,6,7]], ARRAY[ARRAY[1.1,2.1,3]],
ARRAY[ARRAY[5.0,6.0,7.0]]],test_jsonb(),
ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()],
ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()],
ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]],
ARRAY[ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]],
ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]],
ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]],
ARRAY[ARRAY[test_jsonb(),test_jsonb(),test_jsonb(),test_jsonb()]]]);

-- This works fine before pg14.
SELECT DISTINCT ROW(col1, col2, col3, col4, col5, col6, col70, col7, col8,
col9, col10,
col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21,
col22, col23, col24, col25,
col26, col27, col28, col29, col32, col33, col34, col35, col36, col37, col38)
AS "row" FROM local WHERE true;
ERROR:  could not identify a hash function for type bit
```

The `SELECT DISTINCT ROW` works fine prior to Postgres 14, so it seems like
there might be some problem with Postgres14. Is this expected?

Best,
Talha.


Re: BUG #17158: Distinct ROW fails with Postgres 14

От
David Rowley
Дата:
On Tue, 24 Aug 2021 at 21:02, PG Bug reporting form
<noreply@postgresql.org> wrote:
> -- This works fine before pg14.
> SELECT DISTINCT ROW(col1, col2, col3, col4, col5, col6, col70, col7, col8,
> col9, col10,
> col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21,
> col22, col23, col24, col25,
> col26, col27, col28, col29, col32, col33, col34, col35, col36, col37, col38)
> AS "row" FROM local WHERE true;
> ERROR:  could not identify a hash function for type bit

It looks like 01e658fa74 is to blame for this.

The test case can be simplified down to just:

create table local (b bit);
insert into local values('1'),('0');
SELECT DISTINCT ROW(b) FROM local;

Tom did have a look at this and raise the question about the
possibility of not being able to hash in [1].

If it's going to be a problem detecting the lack of hashability during
planning then maybe we can just add a hash opclass for BIT to fix this
particular case.

I've copied in Peter as 01e658fa74 is one of his.

David

[1] https://www.postgresql.org/message-id/20201019233234.r6lyxbvdg5s77rvd%40alap3.anarazel.de



Re: BUG #17158: Distinct ROW fails with Postgres 14

От
Tom Lane
Дата:
David Rowley <dgrowleyml@gmail.com> writes:
> It looks like 01e658fa74 is to blame for this.

> The test case can be simplified down to just:

> create table local (b bit);
> insert into local values('1'),('0');
> SELECT DISTINCT ROW(b) FROM local;

> Tom did have a look at this and raise the question about the
> possibility of not being able to hash in [1].

Huh.  According to the thread, we discussed this exact possibility and
there's a test case verifying it ... so apparently something got
fat-fingered there.

> If it's going to be a problem detecting the lack of hashability during
> planning then maybe we can just add a hash opclass for BIT to fix this
> particular case.

Most certainly not.  That would translate to a requirement that EVERY
data type have a hash function.

            regards, tom lane



Re: BUG #17158: Distinct ROW fails with Postgres 14

От
Tom Lane
Дата:
I wrote:
> David Rowley <dgrowleyml@gmail.com> writes:
>> Tom did have a look at this and raise the question about the
>> possibility of not being able to hash in [1].

> Huh.  According to the thread, we discussed this exact possibility and
> there's a test case verifying it ... so apparently something got
> fat-fingered there.

I think this is on me, because I must not have looked closely enough
at Peter's test case to realize that he was proposing to consider it
okay to fail.  We can't have that, for exactly the reason seen here,
which is that people will consider it a regression if their queries
used to work and now hit this error.

The proximate cause of the problem is this over-optimistic bit in
cache_record_field_properties():

    /*
     * For type RECORD, we can't really tell what will work, since we don't
     * have access here to the specific anonymous type.  Just assume that
     * everything will (we may get a failure at runtime ...)
     */
    if (typentry->type_id == RECORDOID)
    {
        typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
                            TCFLAGS_HAVE_FIELD_COMPARE |
                            TCFLAGS_HAVE_FIELD_HASHING |
                            TCFLAGS_HAVE_FIELD_EXTENDED_HASHING);
    }

where 01e658fa74 just blindly added hashing to the set of things we
assume an unknown record type can do.  While we seem to have mostly
gotten away with assuming that comparison ops are available, it's
clearly a step too far to assume that hashing is.

The correct long-term fix is to remove this assumption altogether
in favor of adding code to check more carefully in the planner.
But it's probably a bit late in the game to try to fix that for v14.
I propose that we just revert this code to the way it was before
(and improve the comment to explain what's going on).

            regards, tom lane



Re: BUG #17158: Distinct ROW fails with Postgres 14

От
Peter Eisentraut
Дата:
On 24.08.21 11:55, David Rowley wrote:
> If it's going to be a problem detecting the lack of hashability during
> planning then maybe we can just add a hash opclass for BIT to fix this
> particular case.

The following types have btree opclasses but not hash opclasses:

money
bit
bit varying
tsvector
tsquery

Also among contrib:

cube
ltree
seg

We could fix the first three relatively easily (although money is used 
in test cases as not having a hash opclass).  Not sure what to do about 
the rest.



Re: BUG #17158: Distinct ROW fails with Postgres 14

От
Tom Lane
Дата:
Peter Eisentraut <peter.eisentraut@enterprisedb.com> writes:
> On 24.08.21 11:55, David Rowley wrote:
>> If it's going to be a problem detecting the lack of hashability during
>> planning then maybe we can just add a hash opclass for BIT to fix this
>> particular case.

> The following types have btree opclasses but not hash opclasses:
> money
> bit
> bit varying
> tsvector
> tsquery
> Also among contrib:
> cube
> ltree
> seg
> We could fix the first three relatively easily (although money is used 
> in test cases as not having a hash opclass).  Not sure what to do about 
> the rest.

We can *not* institute a policy that all types must have hash opclasses,
which is what David's suggestion amounts to.

I've been thinking some more about my upthread suggestion that we just
revert cache_record_field_properties to the way it was, and I think that
it's actually pretty defensible, i.e. the lack of prior complaints isn't
all that astonishing.  If a query plan involves making comparisons
(either equality or more general ordering comparisons) on a given RECORD
column, it's pretty likely that that traces directly to a semantic
requirement of the query.  So the user won't/shouldn't be surprised if
he gets a failure about a component type not being able to perform the
comparison.  The fact that we issue the error at run time not plan
time is a little ugly, but it'd be the same error if we had full
knowledge at plan time.  On the other hand, hashing is an implementation
choice, not a semantic requirement, so users can reasonably expect the
planner to avoid using hashing when it won't work.

This argument falls down in a situation where duplicate-elimination
could be done with either hashing or sorting and the datatype has
hashing but not ordering support.  I'd argue, however, that the set of
such datatypes is darn near empty.  In any case, such failures are not
regressions because they never worked before either.

Undoing that would lose v14's ability to select hashed duplicate
elimination for RECORD columns, but that's still not a regression
because we didn't have it before.  Moreover, anyone who's unhappy can
work around the problem by explicitly casting the column to some
suitable named composite type.  We can leave it for later to make the
planner smarter about anonymous record types.  It clearly could be
smarter, at least for the case of an explicit ROW construct at top
level; but now is no time to be writing such code for v14.

            regards, tom lane



Re: BUG #17158: Distinct ROW fails with Postgres 14

От
Peter Eisentraut
Дата:
On 25.08.21 00:16, Tom Lane wrote:
> Undoing that would lose v14's ability to select hashed duplicate
> elimination for RECORD columns, but that's still not a regression
> because we didn't have it before.  Moreover, anyone who's unhappy can
> work around the problem by explicitly casting the column to some
> suitable named composite type.  We can leave it for later to make the
> planner smarter about anonymous record types.  It clearly could be
> smarter, at least for the case of an explicit ROW construct at top
> level; but now is no time to be writing such code for v14.

This feature is a requirement for multicolumn path and cycle tracking in 
recursive queries, as well as the search/cycle syntax built on top of 
that, so there is a bit more depending on it than might be at first 
apparent.

I've been looking at ways to repair this with minimal impact. 
Essentially, we'd need a way ask the type cache to distinguish between 
"do you have hash support if it's guaranteed to work" versus "hash 
support is my only hope, so give it to me even if you're not completely 
sure it will work".  Putting this directly into the type cache does not 
seem feasible with the current structure.  But there aren't that many 
callers of TYPECACHE_HASH_PROC*, so I looked at handling it there.

Variant 1 is that we let the type cache *not* report hash support for 
the record type, and let callers fill it in.  In the attached patch I've 
only done this for hash_array(), because that's what's needed to get the 
tests to pass, but similar code would be possible for row types, range 
types, etc.

Variant 2 is that we let the type cache report hash support for the 
record type, like now, and then let callers override it if they have 
other options.  This is the second attached patch.

It's basically fifty-fifty in terms of how many places you need to touch 
in either case.

With both patches, you'll see the "union" regression test fail, which 
includes a test case that is equivalent to the one from this bug report 
(but using money instead of bit), but the "with" test still passes, 
which covers the feature I mentioned at the beginning.

Thoughts?

Вложения

Re: BUG #17158: Distinct ROW fails with Postgres 14

От
Tom Lane
Дата:
Peter Eisentraut <peter.eisentraut@enterprisedb.com> writes:
> This feature is a requirement for multicolumn path and cycle tracking in 
> recursive queries, as well as the search/cycle syntax built on top of 
> that, so there is a bit more depending on it than might be at first 
> apparent.

Hmm.

> Variant 1 is that we let the type cache *not* report hash support for 
> the record type, and let callers fill it in.  In the attached patch I've 
> only done this for hash_array(), because that's what's needed to get the 
> tests to pass, but similar code would be possible for row types, range 
> types, etc.

> Variant 2 is that we let the type cache report hash support for the 
> record type, like now, and then let callers override it if they have 
> other options.  This is the second attached patch.

I find variant 1 a bit cleaner, and safer.  I'd rather default to
assuming that RECORD doesn't hash, when we don't have enough info
to be sure.

            regards, tom lane



Re: BUG #17158: Distinct ROW fails with Postgres 14

От
Peter Eisentraut
Дата:
On 31.08.21 22:43, Tom Lane wrote:
> I find variant 1 a bit cleaner, and safer.  I'd rather default to
> assuming that RECORD doesn't hash, when we don't have enough info
> to be sure.

Ok, here is a more polished patch for that.

Вложения

Re: BUG #17158: Distinct ROW fails with Postgres 14

От
Peter Eisentraut
Дата:
On 02.09.21 10:15, Peter Eisentraut wrote:
> On 31.08.21 22:43, Tom Lane wrote:
>> I find variant 1 a bit cleaner, and safer.  I'd rather default to
>> assuming that RECORD doesn't hash, when we don't have enough info
>> to be sure.
> 
> Ok, here is a more polished patch for that.

committed



Re: BUG #17158: Distinct ROW fails with Postgres 14

От
Tom Lane
Дата:
Peter Eisentraut <peter.eisentraut@enterprisedb.com> writes:
> On 02.09.21 10:15, Peter Eisentraut wrote:
>> On 31.08.21 22:43, Tom Lane wrote:
>>> I find variant 1 a bit cleaner, and safer.  I'd rather default to
>>> assuming that RECORD doesn't hash, when we don't have enough info
>>> to be sure.

>> Ok, here is a more polished patch for that.

> committed

I apologize for not having found the time to review this before
it went in ... but what you did in hash_array is pretty awful:

        /*
         * The type cache doesn't believe that record is hashable (see
         * cache_record_field_properties()), but since we're here, we're
         * committed to hashing, so we can assume it does.  Worst case, if any
         * components of the record don't support hashing, we will fail at
         * execution.
         */
        if (element_type == RECORDOID)
        {
            MemoryContext oldcontext;
            TypeCacheEntry *record_typentry;

            oldcontext = MemoryContextSwitchTo(CacheMemoryContext);

            /*
             * Make fake type cache entry structure.  Note that we can't just
             * modify typentry, since that points directly into the type cache.
             */
            record_typentry = palloc(sizeof(*record_typentry));

            /* fill in what we need below */
            record_typentry->typlen = typentry->typlen;
            record_typentry->typbyval = typentry->typbyval;
            record_typentry->typalign = typentry->typalign;
            fmgr_info(F_HASH_RECORD, &record_typentry->hash_proc_finfo);

            MemoryContextSwitchTo(oldcontext);

            typentry = record_typentry;
        }

        fcinfo->flinfo->fn_extra = (void *) typentry;
    }

The reason skink has been falling over since this went in is that
this kluge didn't bother to fill record_typentry->type_id, which
results in the next call seeing an undefined value in

    if (typentry == NULL ||
        typentry->type_id != element_type)

which most likely will cause it to allocate another dummy typcache
entry; lather, rinse, repeat for each call.  But even with that
fixed, I do not think this is even a little bit acceptable, because
it will permanently leak a TypeCacheEntry plus subsidiary FmgrInfo data
for each query that uses hash_array.

Perhaps it'd work to put the phony entry into fcinfo->flinfo->fn_mcxt
instead of CacheMemoryContext.

BTW, skink's failure can be reproduced pretty quickly by running the
attached under valgrind.

            regards, tom lane


create temp table graph0( f int, t int, label text );

insert into graph0 values
    (1, 2, 'arc 1 -> 2'),
    (1, 3, 'arc 1 -> 3'),
    (2, 3, 'arc 2 -> 3'),
    (1, 4, 'arc 1 -> 4'),
    (4, 5, 'arc 4 -> 5');

with recursive search_graph(f, t, label) as (
    select * from graph0 g
    union distinct
    select g.*
    from graph0 g, search_graph sg
    where g.f = sg.t
) search depth first by f, t set seq
select * from search_graph order by seq;

Re: BUG #17158: Distinct ROW fails with Postgres 14

От
Peter Eisentraut
Дата:
On 10.09.21 21:27, Tom Lane wrote:
> Perhaps it'd work to put the phony entry into fcinfo->flinfo->fn_mcxt
> instead of CacheMemoryContext.
> 
> BTW, skink's failure can be reproduced pretty quickly by running the
> attached under valgrind.

Ok, the attached patch fixes the valgrind error.

Вложения

Re: BUG #17158: Distinct ROW fails with Postgres 14

От
Tom Lane
Дата:
Peter Eisentraut <peter.eisentraut@enterprisedb.com> writes:
> On 10.09.21 21:27, Tom Lane wrote:
>> Perhaps it'd work to put the phony entry into fcinfo->flinfo->fn_mcxt
>> instead of CacheMemoryContext.

> Ok, the attached patch fixes the valgrind error.

Looks roughly sane to me.  I'm of two minds about whether you
ought to change the palloc to palloc0.  We'd have taken much
longer to notice this problem if palloc0 had been used; but
from any standpoint other than "will valgrind catch it", it
seems like zeroing the fake typcache entry would be safer.

            regards, tom lane



Re: BUG #17158: Distinct ROW fails with Postgres 14

От
Peter Eisentraut
Дата:
On 14.09.21 17:02, Tom Lane wrote:
> Peter Eisentraut <peter.eisentraut@enterprisedb.com> writes:
>> On 10.09.21 21:27, Tom Lane wrote:
>>> Perhaps it'd work to put the phony entry into fcinfo->flinfo->fn_mcxt
>>> instead of CacheMemoryContext.
> 
>> Ok, the attached patch fixes the valgrind error.
> 
> Looks roughly sane to me.  I'm of two minds about whether you
> ought to change the palloc to palloc0.  We'd have taken much
> longer to notice this problem if palloc0 had been used; but
> from any standpoint other than "will valgrind catch it", it
> seems like zeroing the fake typcache entry would be safer.

Yeah, pushed with the palloc0.