LCOV - differential code coverage report
Current view: top level - src/backend/utils/adt - array_typanalyze.c (source / functions) Coverage Total Hit UNC UBC GNC CBC DUB DCB
Current: Differential Code Coverage HEAD vs 15 Lines: 92.2 % 218 201 1 16 3 198 1 3
Current Date: 2023-04-08 15:15:32 Functions: 88.9 % 9 8 1 1 7
Baseline: 15
Baseline Date: 2023-04-08 15:09:40
Legend: Lines: hit not hit

           TLA  Line data    Source code
       1                 : /*-------------------------------------------------------------------------
       2                 :  *
       3                 :  * array_typanalyze.c
       4                 :  *    Functions for gathering statistics from array columns
       5                 :  *
       6                 :  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
       7                 :  * Portions Copyright (c) 1994, Regents of the University of California
       8                 :  *
       9                 :  *
      10                 :  * IDENTIFICATION
      11                 :  *    src/backend/utils/adt/array_typanalyze.c
      12                 :  *
      13                 :  *-------------------------------------------------------------------------
      14                 :  */
      15                 : #include "postgres.h"
      16                 : 
      17                 : #include "access/detoast.h"
      18                 : #include "commands/vacuum.h"
      19                 : #include "utils/array.h"
      20                 : #include "utils/builtins.h"
      21                 : #include "utils/datum.h"
      22                 : #include "utils/lsyscache.h"
      23                 : #include "utils/typcache.h"
      24                 : 
      25                 : 
      26                 : /*
      27                 :  * To avoid consuming too much memory, IO and CPU load during analysis, and/or
      28                 :  * too much space in the resulting pg_statistic rows, we ignore arrays that
      29                 :  * are wider than ARRAY_WIDTH_THRESHOLD (after detoasting!).  Note that this
      30                 :  * number is considerably more than the similar WIDTH_THRESHOLD limit used
      31                 :  * in analyze.c's standard typanalyze code.
      32                 :  */
      33                 : #define ARRAY_WIDTH_THRESHOLD 0x10000
      34                 : 
      35                 : /* Extra data for compute_array_stats function */
      36                 : typedef struct
      37                 : {
      38                 :     /* Information about array element type */
      39                 :     Oid         type_id;        /* element type's OID */
      40                 :     Oid         eq_opr;         /* default equality operator's OID */
      41                 :     Oid         coll_id;        /* collation to use */
      42                 :     bool        typbyval;       /* physical properties of element type */
      43                 :     int16       typlen;
      44                 :     char        typalign;
      45                 : 
      46                 :     /*
      47                 :      * Lookup data for element type's comparison and hash functions (these are
      48                 :      * in the type's typcache entry, which we expect to remain valid over the
      49                 :      * lifespan of the ANALYZE run)
      50                 :      */
      51                 :     FmgrInfo   *cmp;
      52                 :     FmgrInfo   *hash;
      53                 : 
      54                 :     /* Saved state from std_typanalyze() */
      55                 :     AnalyzeAttrComputeStatsFunc std_compute_stats;
      56                 :     void       *std_extra_data;
      57                 : } ArrayAnalyzeExtraData;
      58                 : 
      59                 : /*
      60                 :  * While compute_array_stats is running, we keep a pointer to the extra data
      61                 :  * here for use by assorted subroutines.  compute_array_stats doesn't
      62                 :  * currently need to be re-entrant, so avoiding this is not worth the extra
      63                 :  * notational cruft that would be needed.
      64                 :  */
      65                 : static ArrayAnalyzeExtraData *array_extra_data;
      66                 : 
      67                 : /* A hash table entry for the Lossy Counting algorithm */
      68                 : typedef struct
      69                 : {
      70                 :     Datum       key;            /* This is 'e' from the LC algorithm. */
      71                 :     int         frequency;      /* This is 'f'. */
      72                 :     int         delta;          /* And this is 'delta'. */
      73                 :     int         last_container; /* For de-duplication of array elements. */
      74                 : } TrackItem;
      75                 : 
      76                 : /* A hash table entry for distinct-elements counts */
      77                 : typedef struct
      78                 : {
      79                 :     int         count;          /* Count of distinct elements in an array */
      80                 :     int         frequency;      /* Number of arrays seen with this count */
      81                 : } DECountItem;
      82                 : 
      83                 : static void compute_array_stats(VacAttrStats *stats,
      84                 :                                 AnalyzeAttrFetchFunc fetchfunc, int samplerows, double totalrows);
      85                 : static void prune_element_hashtable(HTAB *elements_tab, int b_current);
      86                 : static uint32 element_hash(const void *key, Size keysize);
      87                 : static int  element_match(const void *key1, const void *key2, Size keysize);
      88                 : static int  element_compare(const void *key1, const void *key2);
      89                 : static int  trackitem_compare_frequencies_desc(const void *e1, const void *e2, void *arg);
      90                 : static int  trackitem_compare_element(const void *e1, const void *e2, void *arg);
      91                 : static int  countitem_compare_count(const void *e1, const void *e2, void *arg);
      92                 : 
      93                 : 
      94                 : /*
      95                 :  * array_typanalyze -- typanalyze function for array columns
      96                 :  */
      97                 : Datum
      98 CBC       14084 : array_typanalyze(PG_FUNCTION_ARGS)
      99                 : {
     100           14084 :     VacAttrStats *stats = (VacAttrStats *) PG_GETARG_POINTER(0);
     101                 :     Oid         element_typeid;
     102                 :     TypeCacheEntry *typentry;
     103                 :     ArrayAnalyzeExtraData *extra_data;
     104                 : 
     105                 :     /*
     106                 :      * Call the standard typanalyze function.  It may fail to find needed
     107                 :      * operators, in which case we also can't do anything, so just fail.
     108                 :      */
     109           14084 :     if (!std_typanalyze(stats))
     110 UBC           0 :         PG_RETURN_BOOL(false);
     111                 : 
     112                 :     /*
     113                 :      * Check attribute data type is a varlena array (or a domain over one).
     114                 :      */
     115 CBC       14084 :     element_typeid = get_base_element_type(stats->attrtypid);
     116           14084 :     if (!OidIsValid(element_typeid))
     117 UBC           0 :         elog(ERROR, "array_typanalyze was invoked for non-array type %u",
     118                 :              stats->attrtypid);
     119                 : 
     120                 :     /*
     121                 :      * Gather information about the element type.  If we fail to find
     122                 :      * something, return leaving the state from std_typanalyze() in place.
     123                 :      */
     124 CBC       14084 :     typentry = lookup_type_cache(element_typeid,
     125                 :                                  TYPECACHE_EQ_OPR |
     126                 :                                  TYPECACHE_CMP_PROC_FINFO |
     127                 :                                  TYPECACHE_HASH_PROC_FINFO);
     128                 : 
     129           14084 :     if (!OidIsValid(typentry->eq_opr) ||
     130           13760 :         !OidIsValid(typentry->cmp_proc_finfo.fn_oid) ||
     131            9208 :         !OidIsValid(typentry->hash_proc_finfo.fn_oid))
     132            4876 :         PG_RETURN_BOOL(true);
     133                 : 
     134                 :     /* Store our findings for use by compute_array_stats() */
     135            9208 :     extra_data = (ArrayAnalyzeExtraData *) palloc(sizeof(ArrayAnalyzeExtraData));
     136            9208 :     extra_data->type_id = typentry->type_id;
     137            9208 :     extra_data->eq_opr = typentry->eq_opr;
     138            9208 :     extra_data->coll_id = stats->attrcollid;  /* collation we should use */
     139            9208 :     extra_data->typbyval = typentry->typbyval;
     140            9208 :     extra_data->typlen = typentry->typlen;
     141            9208 :     extra_data->typalign = typentry->typalign;
     142            9208 :     extra_data->cmp = &typentry->cmp_proc_finfo;
     143            9208 :     extra_data->hash = &typentry->hash_proc_finfo;
     144                 : 
     145                 :     /* Save old compute_stats and extra_data for scalar statistics ... */
     146            9208 :     extra_data->std_compute_stats = stats->compute_stats;
     147            9208 :     extra_data->std_extra_data = stats->extra_data;
     148                 : 
     149                 :     /* ... and replace with our info */
     150            9208 :     stats->compute_stats = compute_array_stats;
     151            9208 :     stats->extra_data = extra_data;
     152                 : 
     153                 :     /*
     154                 :      * Note we leave stats->minrows set as std_typanalyze set it.  Should it
     155                 :      * be increased for array analysis purposes?
     156                 :      */
     157                 : 
     158            9208 :     PG_RETURN_BOOL(true);
     159                 : }
     160                 : 
     161                 : /*
     162                 :  * compute_array_stats() -- compute statistics for an array column
     163                 :  *
     164                 :  * This function computes statistics useful for determining selectivity of
     165                 :  * the array operators <@, &&, and @>.  It is invoked by ANALYZE via the
     166                 :  * compute_stats hook after sample rows have been collected.
     167                 :  *
     168                 :  * We also invoke the standard compute_stats function, which will compute
     169                 :  * "scalar" statistics relevant to the btree-style array comparison operators.
     170                 :  * However, exact duplicates of an entire array may be rare despite many
     171                 :  * arrays sharing individual elements.  This especially afflicts long arrays,
     172                 :  * which are also liable to lack all scalar statistics due to the low
     173                 :  * WIDTH_THRESHOLD used in analyze.c.  So, in addition to the standard stats,
     174                 :  * we find the most common array elements and compute a histogram of distinct
     175                 :  * element counts.
     176                 :  *
     177                 :  * The algorithm used is Lossy Counting, as proposed in the paper "Approximate
     178                 :  * frequency counts over data streams" by G. S. Manku and R. Motwani, in
     179                 :  * Proceedings of the 28th International Conference on Very Large Data Bases,
     180                 :  * Hong Kong, China, August 2002, section 4.2. The paper is available at
     181                 :  * http://www.vldb.org/conf/2002/S10P03.pdf
     182                 :  *
     183                 :  * The Lossy Counting (aka LC) algorithm goes like this:
     184                 :  * Let s be the threshold frequency for an item (the minimum frequency we
     185                 :  * are interested in) and epsilon the error margin for the frequency. Let D
     186                 :  * be a set of triples (e, f, delta), where e is an element value, f is that
     187                 :  * element's frequency (actually, its current occurrence count) and delta is
     188                 :  * the maximum error in f. We start with D empty and process the elements in
     189                 :  * batches of size w. (The batch size is also known as "bucket size" and is
     190                 :  * equal to 1/epsilon.) Let the current batch number be b_current, starting
     191                 :  * with 1. For each element e we either increment its f count, if it's
     192                 :  * already in D, or insert a new triple into D with values (e, 1, b_current
     193                 :  * - 1). After processing each batch we prune D, by removing from it all
     194                 :  * elements with f + delta <= b_current.  After the algorithm finishes we
     195                 :  * suppress all elements from D that do not satisfy f >= (s - epsilon) * N,
     196                 :  * where N is the total number of elements in the input.  We emit the
     197                 :  * remaining elements with estimated frequency f/N.  The LC paper proves
     198                 :  * that this algorithm finds all elements with true frequency at least s,
     199                 :  * and that no frequency is overestimated or is underestimated by more than
     200                 :  * epsilon.  Furthermore, given reasonable assumptions about the input
     201                 :  * distribution, the required table size is no more than about 7 times w.
     202                 :  *
     203                 :  * In the absence of a principled basis for other particular values, we
     204                 :  * follow ts_typanalyze() and use parameters s = 0.07/K, epsilon = s/10.
     205                 :  * But we leave out the correction for stopwords, which do not apply to
     206                 :  * arrays.  These parameters give bucket width w = K/0.007 and maximum
     207                 :  * expected hashtable size of about 1000 * K.
     208                 :  *
     209                 :  * Elements may repeat within an array.  Since duplicates do not change the
     210                 :  * behavior of <@, && or @>, we want to count each element only once per
     211                 :  * array.  Therefore, we store in the finished pg_statistic entry each
     212                 :  * element's frequency as the fraction of all non-null rows that contain it.
     213                 :  * We divide the raw counts by nonnull_cnt to get those figures.
     214                 :  */
     215                 : static void
     216            5965 : compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
     217                 :                     int samplerows, double totalrows)
     218                 : {
     219                 :     ArrayAnalyzeExtraData *extra_data;
     220                 :     int         num_mcelem;
     221            5965 :     int         null_elem_cnt = 0;
     222            5965 :     int         analyzed_rows = 0;
     223                 : 
     224                 :     /* This is D from the LC algorithm. */
     225                 :     HTAB       *elements_tab;
     226                 :     HASHCTL     elem_hash_ctl;
     227                 :     HASH_SEQ_STATUS scan_status;
     228                 : 
     229                 :     /* This is the current bucket number from the LC algorithm */
     230                 :     int         b_current;
     231                 : 
     232                 :     /* This is 'w' from the LC algorithm */
     233                 :     int         bucket_width;
     234                 :     int         array_no;
     235                 :     int64       element_no;
     236                 :     TrackItem  *item;
     237                 :     int         slot_idx;
     238                 :     HTAB       *count_tab;
     239                 :     HASHCTL     count_hash_ctl;
     240                 :     DECountItem *count_item;
     241                 : 
     242            5965 :     extra_data = (ArrayAnalyzeExtraData *) stats->extra_data;
     243                 : 
     244                 :     /*
     245                 :      * Invoke analyze.c's standard analysis function to create scalar-style
     246                 :      * stats for the column.  It will expect its own extra_data pointer, so
     247                 :      * temporarily install that.
     248                 :      */
     249            5965 :     stats->extra_data = extra_data->std_extra_data;
     250            5965 :     extra_data->std_compute_stats(stats, fetchfunc, samplerows, totalrows);
     251            5965 :     stats->extra_data = extra_data;
     252                 : 
     253                 :     /*
     254                 :      * Set up static pointer for use by subroutines.  We wait till here in
     255                 :      * case std_compute_stats somehow recursively invokes us (probably not
     256                 :      * possible, but ...)
     257                 :      */
     258            5965 :     array_extra_data = extra_data;
     259                 : 
     260                 :     /*
     261                 :      * We want statistics_target * 10 elements in the MCELEM array. This
     262                 :      * multiplier is pretty arbitrary, but is meant to reflect the fact that
     263                 :      * the number of individual elements tracked in pg_statistic ought to be
     264                 :      * more than the number of values for a simple scalar column.
     265                 :      */
     266            5965 :     num_mcelem = stats->attr->attstattarget * 10;
     267                 : 
     268                 :     /*
     269                 :      * We set bucket width equal to num_mcelem / 0.007 as per the comment
     270                 :      * above.
     271                 :      */
     272            5965 :     bucket_width = num_mcelem * 1000 / 7;
     273                 : 
     274                 :     /*
     275                 :      * Create the hashtable. It will be in local memory, so we don't need to
     276                 :      * worry about overflowing the initial size. Also we don't need to pay any
     277                 :      * attention to locking and memory management.
     278                 :      */
     279            5965 :     elem_hash_ctl.keysize = sizeof(Datum);
     280            5965 :     elem_hash_ctl.entrysize = sizeof(TrackItem);
     281            5965 :     elem_hash_ctl.hash = element_hash;
     282            5965 :     elem_hash_ctl.match = element_match;
     283            5965 :     elem_hash_ctl.hcxt = CurrentMemoryContext;
     284            5965 :     elements_tab = hash_create("Analyzed elements table",
     285                 :                                num_mcelem,
     286                 :                                &elem_hash_ctl,
     287                 :                                HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
     288                 : 
     289                 :     /* hashtable for array distinct elements counts */
     290            5965 :     count_hash_ctl.keysize = sizeof(int);
     291            5965 :     count_hash_ctl.entrysize = sizeof(DECountItem);
     292            5965 :     count_hash_ctl.hcxt = CurrentMemoryContext;
     293            5965 :     count_tab = hash_create("Array distinct element count table",
     294                 :                             64,
     295                 :                             &count_hash_ctl,
     296                 :                             HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
     297                 : 
     298                 :     /* Initialize counters. */
     299            5965 :     b_current = 1;
     300            5965 :     element_no = 0;
     301                 : 
     302                 :     /* Loop over the arrays. */
     303         8107412 :     for (array_no = 0; array_no < samplerows; array_no++)
     304                 :     {
     305                 :         Datum       value;
     306                 :         bool        isnull;
     307                 :         ArrayType  *array;
     308                 :         int         num_elems;
     309                 :         Datum      *elem_values;
     310                 :         bool       *elem_nulls;
     311                 :         bool        null_present;
     312                 :         int         j;
     313         8101447 :         int64       prev_element_no = element_no;
     314                 :         int         distinct_count;
     315                 :         bool        count_item_found;
     316                 : 
     317         8101447 :         vacuum_delay_point();
     318                 : 
     319         8101447 :         value = fetchfunc(stats, array_no, &isnull);
     320         8101447 :         if (isnull)
     321                 :         {
     322                 :             /* ignore arrays that are null overall */
     323         7711706 :             continue;
     324                 :         }
     325                 : 
     326                 :         /* Skip too-large values. */
     327          389741 :         if (toast_raw_datum_size(value) > ARRAY_WIDTH_THRESHOLD)
     328 UBC           0 :             continue;
     329                 :         else
     330 CBC      389741 :             analyzed_rows++;
     331                 : 
     332                 :         /*
     333                 :          * Now detoast the array if needed, and deconstruct into datums.
     334                 :          */
     335          389741 :         array = DatumGetArrayTypeP(value);
     336                 : 
     337          389741 :         Assert(ARR_ELEMTYPE(array) == extra_data->type_id);
     338          389741 :         deconstruct_array(array,
     339                 :                           extra_data->type_id,
     340          389741 :                           extra_data->typlen,
     341          389741 :                           extra_data->typbyval,
     342          389741 :                           extra_data->typalign,
     343                 :                           &elem_values, &elem_nulls, &num_elems);
     344                 : 
     345                 :         /*
     346                 :          * We loop through the elements in the array and add them to our
     347                 :          * tracking hashtable.
     348                 :          */
     349          389741 :         null_present = false;
     350         1722526 :         for (j = 0; j < num_elems; j++)
     351                 :         {
     352                 :             Datum       elem_value;
     353                 :             bool        found;
     354                 : 
     355                 :             /* No null element processing other than flag setting here */
     356         1332785 :             if (elem_nulls[j])
     357                 :             {
     358              20 :                 null_present = true;
     359          250450 :                 continue;
     360                 :             }
     361                 : 
     362                 :             /* Lookup current element in hashtable, adding it if new */
     363         1332765 :             elem_value = elem_values[j];
     364         1332765 :             item = (TrackItem *) hash_search(elements_tab,
     365                 :                                              &elem_value,
     366                 :                                              HASH_ENTER, &found);
     367                 : 
     368         1332765 :             if (found)
     369                 :             {
     370                 :                 /* The element value is already on the tracking list */
     371                 : 
     372                 :                 /*
     373                 :                  * The operators we assist ignore duplicate array elements, so
     374                 :                  * count a given distinct element only once per array.
     375                 :                  */
     376         1048677 :                 if (item->last_container == array_no)
     377          250430 :                     continue;
     378                 : 
     379          798247 :                 item->frequency++;
     380          798247 :                 item->last_container = array_no;
     381                 :             }
     382                 :             else
     383                 :             {
     384                 :                 /* Initialize new tracking list element */
     385                 : 
     386                 :                 /*
     387                 :                  * If element type is pass-by-reference, we must copy it into
     388                 :                  * palloc'd space, so that we can release the array below. (We
     389                 :                  * do this so that the space needed for element values is
     390                 :                  * limited by the size of the hashtable; if we kept all the
     391                 :                  * array values around, it could be much more.)
     392                 :                  */
     393          568176 :                 item->key = datumCopy(elem_value,
     394          284088 :                                       extra_data->typbyval,
     395          284088 :                                       extra_data->typlen);
     396                 : 
     397          284088 :                 item->frequency = 1;
     398          284088 :                 item->delta = b_current - 1;
     399          284088 :                 item->last_container = array_no;
     400                 :             }
     401                 : 
     402                 :             /* element_no is the number of elements processed (ie N) */
     403         1082335 :             element_no++;
     404                 : 
     405                 :             /* We prune the D structure after processing each bucket */
     406         1082335 :             if (element_no % bucket_width == 0)
     407                 :             {
     408 UBC           0 :                 prune_element_hashtable(elements_tab, b_current);
     409               0 :                 b_current++;
     410                 :             }
     411                 :         }
     412                 : 
     413                 :         /* Count null element presence once per array. */
     414 CBC      389741 :         if (null_present)
     415              20 :             null_elem_cnt++;
     416                 : 
     417                 :         /* Update frequency of the particular array distinct element count. */
     418          389741 :         distinct_count = (int) (element_no - prev_element_no);
     419          389741 :         count_item = (DECountItem *) hash_search(count_tab, &distinct_count,
     420                 :                                                  HASH_ENTER,
     421                 :                                                  &count_item_found);
     422                 : 
     423          389741 :         if (count_item_found)
     424          378076 :             count_item->frequency++;
     425                 :         else
     426           11665 :             count_item->frequency = 1;
     427                 : 
     428                 :         /* Free memory allocated while detoasting. */
     429          389741 :         if (PointerGetDatum(array) != value)
     430          366017 :             pfree(array);
     431          389741 :         pfree(elem_values);
     432          389741 :         pfree(elem_nulls);
     433                 :     }
     434                 : 
     435                 :     /* Skip pg_statistic slots occupied by standard statistics */
     436            5965 :     slot_idx = 0;
     437           10657 :     while (slot_idx < STATISTIC_NUM_SLOTS && stats->stakind[slot_idx] != 0)
     438            4692 :         slot_idx++;
     439            5965 :     if (slot_idx > STATISTIC_NUM_SLOTS - 2)
     440 UBC           0 :         elog(ERROR, "insufficient pg_statistic slots for array stats");
     441                 : 
     442                 :     /* We can only compute real stats if we found some non-null values. */
     443 CBC        5965 :     if (analyzed_rows > 0)
     444                 :     {
     445            1703 :         int         nonnull_cnt = analyzed_rows;
     446                 :         int         count_items_count;
     447                 :         int         i;
     448                 :         TrackItem **sort_table;
     449                 :         int         track_len;
     450                 :         int64       cutoff_freq;
     451                 :         int64       minfreq,
     452                 :                     maxfreq;
     453                 : 
     454                 :         /*
     455                 :          * We assume the standard stats code already took care of setting
     456                 :          * stats_valid, stanullfrac, stawidth, stadistinct.  We'd have to
     457                 :          * re-compute those values if we wanted to not store the standard
     458                 :          * stats.
     459                 :          */
     460                 : 
     461                 :         /*
     462                 :          * Construct an array of the interesting hashtable items, that is,
     463                 :          * those meeting the cutoff frequency (s - epsilon)*N.  Also identify
     464                 :          * the minimum and maximum frequencies among these items.
     465                 :          *
     466                 :          * Since epsilon = s/10 and bucket_width = 1/epsilon, the cutoff
     467                 :          * frequency is 9*N / bucket_width.
     468                 :          */
     469            1703 :         cutoff_freq = 9 * element_no / bucket_width;
     470                 : 
     471            1703 :         i = hash_get_num_entries(elements_tab); /* surely enough space */
     472            1703 :         sort_table = (TrackItem **) palloc(sizeof(TrackItem *) * i);
     473                 : 
     474            1703 :         hash_seq_init(&scan_status, elements_tab);
     475            1703 :         track_len = 0;
     476            1703 :         minfreq = element_no;
     477            1703 :         maxfreq = 0;
     478          287494 :         while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
     479                 :         {
     480          284088 :             if (item->frequency > cutoff_freq)
     481                 :             {
     482          176079 :                 sort_table[track_len++] = item;
     483          176079 :                 minfreq = Min(minfreq, item->frequency);
     484          176079 :                 maxfreq = Max(maxfreq, item->frequency);
     485                 :             }
     486                 :         }
     487            1703 :         Assert(track_len <= i);
     488                 : 
     489                 :         /* emit some statistics for debug purposes */
     490            1703 :         elog(DEBUG3, "compute_array_stats: target # mces = %d, "
     491                 :              "bucket width = %d, "
     492                 :              "# elements = " INT64_FORMAT ", hashtable size = %d, "
     493                 :              "usable entries = %d",
     494                 :              num_mcelem, bucket_width, element_no, i, track_len);
     495                 : 
     496                 :         /*
     497                 :          * If we obtained more elements than we really want, get rid of those
     498                 :          * with least frequencies.  The easiest way is to qsort the array into
     499                 :          * descending frequency order and truncate the array.
     500                 :          */
     501            1703 :         if (num_mcelem < track_len)
     502                 :         {
     503              15 :             qsort_interruptible(sort_table, track_len, sizeof(TrackItem *),
     504                 :                                 trackitem_compare_frequencies_desc, NULL);
     505                 :             /* reset minfreq to the smallest frequency we're keeping */
     506              15 :             minfreq = sort_table[num_mcelem - 1]->frequency;
     507                 :         }
     508                 :         else
     509            1688 :             num_mcelem = track_len;
     510                 : 
     511                 :         /* Generate MCELEM slot entry */
     512            1703 :         if (num_mcelem > 0)
     513                 :         {
     514                 :             MemoryContext old_context;
     515                 :             Datum      *mcelem_values;
     516                 :             float4     *mcelem_freqs;
     517                 : 
     518                 :             /*
     519                 :              * We want to store statistics sorted on the element value using
     520                 :              * the element type's default comparison function.  This permits
     521                 :              * fast binary searches in selectivity estimation functions.
     522                 :              */
     523            1703 :             qsort_interruptible(sort_table, num_mcelem, sizeof(TrackItem *),
     524                 :                                 trackitem_compare_element, NULL);
     525                 : 
     526                 :             /* Must copy the target values into anl_context */
     527            1703 :             old_context = MemoryContextSwitchTo(stats->anl_context);
     528                 : 
     529                 :             /*
     530                 :              * We sorted statistics on the element value, but we want to be
     531                 :              * able to find the minimal and maximal frequencies without going
     532                 :              * through all the values.  We also want the frequency of null
     533                 :              * elements.  Store these three values at the end of mcelem_freqs.
     534                 :              */
     535            1703 :             mcelem_values = (Datum *) palloc(num_mcelem * sizeof(Datum));
     536            1703 :             mcelem_freqs = (float4 *) palloc((num_mcelem + 3) * sizeof(float4));
     537                 : 
     538                 :             /*
     539                 :              * See comments above about use of nonnull_cnt as the divisor for
     540                 :              * the final frequency estimates.
     541                 :              */
     542          170684 :             for (i = 0; i < num_mcelem; i++)
     543                 :             {
     544 GNC      168981 :                 TrackItem  *titem = sort_table[i];
     545                 : 
     546          337962 :                 mcelem_values[i] = datumCopy(titem->key,
     547 CBC      168981 :                                              extra_data->typbyval,
     548          168981 :                                              extra_data->typlen);
     549 GNC      168981 :                 mcelem_freqs[i] = (double) titem->frequency /
     550 CBC      168981 :                     (double) nonnull_cnt;
     551                 :             }
     552            1703 :             mcelem_freqs[i++] = (double) minfreq / (double) nonnull_cnt;
     553            1703 :             mcelem_freqs[i++] = (double) maxfreq / (double) nonnull_cnt;
     554            1703 :             mcelem_freqs[i++] = (double) null_elem_cnt / (double) nonnull_cnt;
     555                 : 
     556            1703 :             MemoryContextSwitchTo(old_context);
     557                 : 
     558            1703 :             stats->stakind[slot_idx] = STATISTIC_KIND_MCELEM;
     559            1703 :             stats->staop[slot_idx] = extra_data->eq_opr;
     560            1703 :             stats->stacoll[slot_idx] = extra_data->coll_id;
     561            1703 :             stats->stanumbers[slot_idx] = mcelem_freqs;
     562                 :             /* See above comment about extra stanumber entries */
     563            1703 :             stats->numnumbers[slot_idx] = num_mcelem + 3;
     564            1703 :             stats->stavalues[slot_idx] = mcelem_values;
     565            1703 :             stats->numvalues[slot_idx] = num_mcelem;
     566                 :             /* We are storing values of element type */
     567            1703 :             stats->statypid[slot_idx] = extra_data->type_id;
     568            1703 :             stats->statyplen[slot_idx] = extra_data->typlen;
     569            1703 :             stats->statypbyval[slot_idx] = extra_data->typbyval;
     570            1703 :             stats->statypalign[slot_idx] = extra_data->typalign;
     571            1703 :             slot_idx++;
     572                 :         }
     573                 : 
     574                 :         /* Generate DECHIST slot entry */
     575            1703 :         count_items_count = hash_get_num_entries(count_tab);
     576            1703 :         if (count_items_count > 0)
     577                 :         {
     578            1703 :             int         num_hist = stats->attr->attstattarget;
     579                 :             DECountItem **sorted_count_items;
     580                 :             int         j;
     581                 :             int         delta;
     582                 :             int64       frac;
     583                 :             float4     *hist;
     584                 : 
     585                 :             /* num_hist must be at least 2 for the loop below to work */
     586            1703 :             num_hist = Max(num_hist, 2);
     587                 : 
     588                 :             /*
     589                 :              * Create an array of DECountItem pointers, and sort them into
     590                 :              * increasing count order.
     591                 :              */
     592                 :             sorted_count_items = (DECountItem **)
     593            1703 :                 palloc(sizeof(DECountItem *) * count_items_count);
     594            1703 :             hash_seq_init(&scan_status, count_tab);
     595            1703 :             j = 0;
     596           13368 :             while ((count_item = (DECountItem *) hash_seq_search(&scan_status)) != NULL)
     597                 :             {
     598           11665 :                 sorted_count_items[j++] = count_item;
     599                 :             }
     600            1703 :             qsort_interruptible(sorted_count_items, count_items_count,
     601                 :                                 sizeof(DECountItem *),
     602                 :                                 countitem_compare_count, NULL);
     603                 : 
     604                 :             /*
     605                 :              * Prepare to fill stanumbers with the histogram, followed by the
     606                 :              * average count.  This array must be stored in anl_context.
     607                 :              */
     608                 :             hist = (float4 *)
     609            1703 :                 MemoryContextAlloc(stats->anl_context,
     610            1703 :                                    sizeof(float4) * (num_hist + 1));
     611            1703 :             hist[num_hist] = (double) element_no / (double) nonnull_cnt;
     612                 : 
     613                 :             /*----------
     614                 :              * Construct the histogram of distinct-element counts (DECs).
     615                 :              *
     616                 :              * The object of this loop is to copy the min and max DECs to
     617                 :              * hist[0] and hist[num_hist - 1], along with evenly-spaced DECs
     618                 :              * in between (where "evenly-spaced" is with reference to the
     619                 :              * whole input population of arrays).  If we had a complete sorted
     620                 :              * array of DECs, one per analyzed row, the i'th hist value would
     621                 :              * come from DECs[i * (analyzed_rows - 1) / (num_hist - 1)]
     622                 :              * (compare the histogram-making loop in compute_scalar_stats()).
     623                 :              * But instead of that we have the sorted_count_items[] array,
     624                 :              * which holds unique DEC values with their frequencies (that is,
     625                 :              * a run-length-compressed version of the full array).  So we
     626                 :              * control advancing through sorted_count_items[] with the
     627                 :              * variable "frac", which is defined as (x - y) * (num_hist - 1),
     628                 :              * where x is the index in the notional DECs array corresponding
     629                 :              * to the start of the next sorted_count_items[] element's run,
     630                 :              * and y is the index in DECs from which we should take the next
     631                 :              * histogram value.  We have to advance whenever x <= y, that is
     632                 :              * frac <= 0.  The x component is the sum of the frequencies seen
     633                 :              * so far (up through the current sorted_count_items[] element),
     634                 :              * and of course y * (num_hist - 1) = i * (analyzed_rows - 1),
     635                 :              * per the subscript calculation above.  (The subscript calculation
     636                 :              * implies dropping any fractional part of y; in this formulation
     637                 :              * that's handled by not advancing until frac reaches 1.)
     638                 :              *
     639                 :              * Even though frac has a bounded range, it could overflow int32
     640                 :              * when working with very large statistics targets, so we do that
     641                 :              * math in int64.
     642                 :              *----------
     643                 :              */
     644            1703 :             delta = analyzed_rows - 1;
     645            1703 :             j = 0;              /* current index in sorted_count_items */
     646                 :             /* Initialize frac for sorted_count_items[0]; y is initially 0 */
     647            1703 :             frac = (int64) sorted_count_items[0]->frequency * (num_hist - 1);
     648          169183 :             for (i = 0; i < num_hist; i++)
     649                 :             {
     650          177442 :                 while (frac <= 0)
     651                 :                 {
     652                 :                     /* Advance, and update x component of frac */
     653            9962 :                     j++;
     654            9962 :                     frac += (int64) sorted_count_items[j]->frequency * (num_hist - 1);
     655                 :                 }
     656          167480 :                 hist[i] = sorted_count_items[j]->count;
     657          167480 :                 frac -= delta;  /* update y for upcoming i increment */
     658                 :             }
     659            1703 :             Assert(j == count_items_count - 1);
     660                 : 
     661            1703 :             stats->stakind[slot_idx] = STATISTIC_KIND_DECHIST;
     662            1703 :             stats->staop[slot_idx] = extra_data->eq_opr;
     663            1703 :             stats->stacoll[slot_idx] = extra_data->coll_id;
     664            1703 :             stats->stanumbers[slot_idx] = hist;
     665            1703 :             stats->numnumbers[slot_idx] = num_hist + 1;
     666            1703 :             slot_idx++;
     667                 :         }
     668                 :     }
     669                 : 
     670                 :     /*
     671                 :      * We don't need to bother cleaning up any of our temporary palloc's. The
     672                 :      * hashtable should also go away, as it used a child memory context.
     673                 :      */
     674            5965 : }
     675                 : 
     676                 : /*
     677                 :  * A function to prune the D structure from the Lossy Counting algorithm.
     678                 :  * Consult compute_tsvector_stats() for wider explanation.
     679                 :  */
     680                 : static void
     681 UBC           0 : prune_element_hashtable(HTAB *elements_tab, int b_current)
     682                 : {
     683                 :     HASH_SEQ_STATUS scan_status;
     684                 :     TrackItem  *item;
     685                 : 
     686               0 :     hash_seq_init(&scan_status, elements_tab);
     687               0 :     while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
     688                 :     {
     689               0 :         if (item->frequency + item->delta <= b_current)
     690                 :         {
     691               0 :             Datum       value = item->key;
     692                 : 
     693 UNC           0 :             if (hash_search(elements_tab, &item->key,
     694                 :                             HASH_REMOVE, NULL) == NULL)
     695 UBC           0 :                 elog(ERROR, "hash table corrupted");
     696                 :             /* We should free memory if element is not passed by value */
     697               0 :             if (!array_extra_data->typbyval)
     698               0 :                 pfree(DatumGetPointer(value));
     699                 :         }
     700                 :     }
     701               0 : }
     702                 : 
     703                 : /*
     704                 :  * Hash function for elements.
     705                 :  *
     706                 :  * We use the element type's default hash opclass, and the column collation
     707                 :  * if the type is collation-sensitive.
     708                 :  */
     709                 : static uint32
     710 CBC     1332765 : element_hash(const void *key, Size keysize)
     711                 : {
     712         1332765 :     Datum       d = *((const Datum *) key);
     713                 :     Datum       h;
     714                 : 
     715         1332765 :     h = FunctionCall1Coll(array_extra_data->hash,
     716         1332765 :                           array_extra_data->coll_id,
     717                 :                           d);
     718         1332765 :     return DatumGetUInt32(h);
     719                 : }
     720                 : 
     721                 : /*
     722                 :  * Matching function for elements, to be used in hashtable lookups.
     723                 :  */
     724                 : static int
     725         1049574 : element_match(const void *key1, const void *key2, Size keysize)
     726                 : {
     727                 :     /* The keysize parameter is superfluous here */
     728         1049574 :     return element_compare(key1, key2);
     729                 : }
     730                 : 
     731                 : /*
     732                 :  * Comparison function for elements.
     733                 :  *
     734                 :  * We use the element type's default btree opclass, and the column collation
     735                 :  * if the type is collation-sensitive.
     736                 :  *
     737                 :  * XXX consider using SortSupport infrastructure
     738                 :  */
     739                 : static int
     740         2458703 : element_compare(const void *key1, const void *key2)
     741                 : {
     742         2458703 :     Datum       d1 = *((const Datum *) key1);
     743         2458703 :     Datum       d2 = *((const Datum *) key2);
     744                 :     Datum       c;
     745                 : 
     746         2458703 :     c = FunctionCall2Coll(array_extra_data->cmp,
     747         2458703 :                           array_extra_data->coll_id,
     748                 :                           d1, d2);
     749         2458703 :     return DatumGetInt32(c);
     750                 : }
     751                 : 
     752                 : /*
     753                 :  * Comparator for sorting TrackItems by frequencies (descending sort)
     754                 :  */
     755                 : static int
     756           17346 : trackitem_compare_frequencies_desc(const void *e1, const void *e2, void *arg)
     757                 : {
     758           17346 :     const TrackItem *const *t1 = (const TrackItem *const *) e1;
     759           17346 :     const TrackItem *const *t2 = (const TrackItem *const *) e2;
     760                 : 
     761           17346 :     return (*t2)->frequency - (*t1)->frequency;
     762                 : }
     763                 : 
     764                 : /*
     765                 :  * Comparator for sorting TrackItems by element values
     766                 :  */
     767                 : static int
     768         1409129 : trackitem_compare_element(const void *e1, const void *e2, void *arg)
     769                 : {
     770         1409129 :     const TrackItem *const *t1 = (const TrackItem *const *) e1;
     771         1409129 :     const TrackItem *const *t2 = (const TrackItem *const *) e2;
     772                 : 
     773         1409129 :     return element_compare(&(*t1)->key, &(*t2)->key);
     774                 : }
     775                 : 
     776                 : /*
     777                 :  * Comparator for sorting DECountItems by count
     778                 :  */
     779                 : static int
     780           31098 : countitem_compare_count(const void *e1, const void *e2, void *arg)
     781                 : {
     782           31098 :     const DECountItem *const *t1 = (const DECountItem *const *) e1;
     783           31098 :     const DECountItem *const *t2 = (const DECountItem *const *) e2;
     784                 : 
     785           31098 :     if ((*t1)->count < (*t2)->count)
     786           14217 :         return -1;
     787           16881 :     else if ((*t1)->count == (*t2)->count)
     788 UBC           0 :         return 0;
     789                 :     else
     790 CBC       16881 :         return 1;
     791                 : }
        

Generated by: LCOV version v1.16-55-g56c0a2a