-
Notifications
You must be signed in to change notification settings - Fork 61
/
Copy pathESTIMATION_FUNS.R
4540 lines (3714 loc) · 178 KB
/
ESTIMATION_FUNS.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#----------------------------------------------#
# Author: Laurent Berge
# Date creation: Tue Apr 23 16:41:47 2019
# Purpose: All estimation functions
#----------------------------------------------#
#' Fixed-effects OLS estimation
#'
#' Estimates OLS with any number of fixed-effects.
#'
#' @inheritParams femlm
#' @inheritSection xpd Dot square bracket operator in formulas
#'
#' @param fml A formula representing the relation to be estimated. For example: `fml = z~x+y`. To include fixed-effects, insert them in this formula using a pipe: e.g. `fml = z~x+y | fe_1+fe_2`. You can combine two fixed-effects with `^`: e.g. `fml = z~x+y|fe_1^fe_2`, see details. You can also use variables with varying slopes using square brackets: e.g. in `fml = z~y|fe_1[x] + fe_2`, see details. To add IVs, insert the endogenous vars./instruments after a pipe, like in `y ~ x | x_endo1 + x_endo2 ~ x_inst1 + x_inst2`. Note that it should always be the last element, see details. Multiple estimations can be performed at once: for multiple dep. vars, wrap them in `c()`: ex `c(y1, y2)`. For multiple indep. vars, use the stepwise functions: ex `x1 + csw(x2, x3)`. The formula `fml = c(y1, y2) ~ x1 + cw0(x2, x3)` leads to 6 estimation, see details. Square brackets starting with a dot can be used to call global variables: `y.[i] ~ x.[1:2]` will lead to `y3 ~ x1 + x2` if `i` is equal to 3 in the current environment (see details in [`xpd`]).
#' @param weights A formula or a numeric vector. Each observation can be weighted, the weights must be greater than 0. If equal to a formula, it should be one-sided: for example `~ var_weight`.
#' @param verbose Integer. Higher values give more information. In particular, it can detail the number of iterations in the demeaning algorithm (the first number is the left-hand-side, the other numbers are the right-hand-side variables).
#' @param demeaned Logical, default is `FALSE`. Only used in the presence of fixed-effects: should the centered variables be returned? If `TRUE`, it creates the items `y_demeaned` and `X_demeaned`.
#' @param notes Logical. By default, two notes are displayed: when NAs are removed (to show additional information) and when some observations are removed because of collinearity. To avoid displaying these messages, you can set `notes = FALSE`. You can remove these messages permanently by using `setFixest_notes(FALSE)`.
#' @param collin.tol Numeric scalar, default is `1e-10`. Threshold deciding when variables should be considered collinear and subsequently removed from the estimation. Higher values means more variables will be removed (if there is presence of collinearity). One signal of presence of collinearity is t-stats that are extremely low (for instance when t-stats < 1e-3).
#' @param y Numeric vector/matrix/data.frame of the dependent variable(s). Multiple dependent variables will return a `fixest_multi` object.
#' @param X Numeric matrix of the regressors.
#' @param fixef_df Matrix/data.frame of the fixed-effects.
#'
#' @details
#' The method used to demean each variable along the fixed-effects is based on Berge (2018), since this is the same problem to solve as for the Gaussian case in a ML setup.
#'
#' @section Combining the fixed-effects:
#' You can combine two variables to make it a new fixed-effect using `^`. The syntax is as follows: `fe_1^fe_2`. Here you created a new variable which is the combination of the two variables fe_1 and fe_2. This is identical to doing `paste0(fe_1, "_", fe_2)` but more convenient.
#'
#' Note that pasting is a costly operation, especially for large data sets. Thus, the internal algorithm uses a numerical trick which is fast, but the drawback is that the identity of each observation is lost (i.e. they are now equal to a meaningless number instead of being equal to `paste0(fe_1, "_", fe_2)`). These \dQuote{identities} are useful only if you're interested in the value of the fixed-effects (that you can extract with [`fixef.fixest`]). If you're only interested in coefficients of the variables, it doesn't matter. Anyway, you can use `combine.quick = FALSE` to tell the internal algorithm to use `paste` instead of the numerical trick. By default, the numerical trick is performed only for large data sets.
#'
#' @section Varying slopes:
#' You can add variables with varying slopes in the fixed-effect part of the formula. The syntax is as follows: `fixef_var[var1, var2]`. Here the variables var1 and var2 will be with varying slopes (one slope per value in fixef_var) and the fixed-effect fixef_var will also be added.
#'
#' To add only the variables with varying slopes and not the fixed-effect, use double square brackets: `fixef_var[[var1, var2]]`.
#'
#' In other words:
#' \itemize{
#' \item `fixef_var[var1, var2]` is equivalent to `fixef_var + fixef_var[[var1]] + fixef_var[[var2]]`
#' \item `fixef_var[[var1, var2]]` is equivalent to `fixef_var[[var1]] + fixef_var[[var2]]`
#' }
#'
#' In general, for convergence reasons, it is recommended to always add the fixed-effect and avoid using only the variable with varying slope (i.e. use single square brackets).
#'
#' @section Lagging variables:
#'
#' To use leads/lags of variables in the estimation, you can: i) either provide the argument `panel.id`, ii) either set your data set as a panel with the function [`panel`], [`f`][fixest::l] and [`d`][fixest::l].
#'
#' You can provide several leads/lags/differences at once: e.g. if your formula is equal to `f(y) ~ l(x, -1:1)`, it means that the dependent variable is equal to the lead of `y`, and you will have as explanatory variables the lead of `x1`, `x1` and the lag of `x1`. See the examples in function [`l`] for more details.
#'
#' @section Interactions:
#'
#' You can interact a numeric variable with a "factor-like" variable by using `i(factor_var, continuous_var, ref)`, where `continuous_var` will be interacted with each value of `factor_var` and the argument `ref` is a value of `factor_var` taken as a reference (optional).
#'
#' Using this specific way to create interactions leads to a different display of the interacted values in [`etable`]. See examples.
#'
#' It is important to note that *if you do not care about the standard-errors of the interactions*, then you can add interactions in the fixed-effects part of the formula, it will be incomparably faster (using the syntax `factor_var[continuous_var]`, as explained in the section \dQuote{Varying slopes}).
#'
#' The function [`i`] has in fact more arguments, please see details in its associated help page.
#'
#' @section On standard-errors:
#'
#' Standard-errors can be computed in different ways, you can use the arguments `se` and `ssc` in [`summary.fixest`] to define how to compute them. By default, in the presence of fixed-effects, standard-errors are automatically clustered.
#'
#' The following vignette: [On standard-errors](https://lrberge.github.io/fixest/articles/standard_errors.html) describes in details how the standard-errors are computed in `fixest` and how you can replicate standard-errors from other software.
#'
#' You can use the functions [`setFixest_vcov`] and [`setFixest_ssc`][fixest::ssc] to permanently set the way the standard-errors are computed.
#'
#' @section Instrumental variables:
#'
#' To estimate two stage least square regressions, insert the relationship between the endogenous regressor(s) and the instruments in a formula, after a pipe.
#'
#' For example, `fml = y ~ x1 | x_endo ~ x_inst` will use the variables `x1` and `x_inst` in the first stage to explain `x_endo`. Then will use the fitted value of `x_endo` (which will be named `fit_x_endo`) and `x1` to explain `y`.
#' To include several endogenous regressors, just use "+", like in: `fml = y ~ x1 | x_endo1 + x_end2 ~ x_inst1 + x_inst2`.
#'
#' Of course you can still add the fixed-effects, but the IV formula must always come last, like in `fml = y ~ x1 | fe1 + fe2 | x_endo ~ x_inst`.
#'
#' If you want to estimate a model without exogenous variables, use `"1"` as a placeholder: e.g. `fml = y ~ 1 | x_endo + x_inst`.
#'
#' By default, the second stage regression is returned. You can access the first stage(s) regressions either directly in the slot `iv_first_stage` (not recommended), or using the argument `stage = 1` from the function [`summary.fixest`]. For example `summary(iv_est, stage = 1)` will give the first stage(s). Note that using summary you can display both the second and first stages at the same time using, e.g., `stage = 1:2` (using `2:1` would reverse the order).
#'
#'
#' @section Multiple estimations:
#'
#' Multiple estimations can be performed at once, they just have to be specified in the formula. Multiple estimations yield a `fixest_multi` object which is \sQuote{kind of} a list of all the results but includes specific methods to access the results in a handy way. Please have a look at the dedicated vignette: [Multiple estimations](https://lrberge.github.io/fixest/articles/multiple_estimations.html).
#'
#' To include multiple dependent variables, wrap them in `c()` (`list()` also works). For instance `fml = c(y1, y2) ~ x1` would estimate the model `fml = y1 ~ x1` and then the model `fml = y2 ~ x1`.
#'
#' To include multiple independent variables, you need to use the stepwise functions. There are 4 stepwise functions: `sw`, `sw0`, `csw`, `csw0`, and `mvsw`. Of course `sw` stands for stepwise, and `csw` for cumulative stepwise. Finally `mvsw` is a bit special, it stands for multiverse stepwise. Let's explain that.
#' Assume you have the following formula: `fml = y ~ x1 + sw(x2, x3)`. The stepwise function `sw` will estimate the following two models: `y ~ x1 + x2` and `y ~ x1 + x3`. That is, each element in `sw()` is sequentially, and separately, added to the formula. Would have you used `sw0` in lieu of `sw`, then the model `y ~ x1` would also have been estimated. The `0` in the name means that the model without any stepwise element also needs to be estimated.
#' The prefix `c` means cumulative: each stepwise element is added to the next. That is, `fml = y ~ x1 + csw(x2, x3)` would lead to the following models `y ~ x1 + x2` and `y ~ x1 + x2 + x3`. The `0` has the same meaning and would also lead to the model without the stepwise elements to be estimated: in other words, `fml = y ~ x1 + csw0(x2, x3)` leads to the following three models: `y ~ x1`, `y ~ x1 + x2` and `y ~ x1 + x2 + x3`.
#' Finally `mvsw` will add, in a stepwise fashion all possible combinations of the variables in its arguments. For example `mvsw(x1, x2, x3)` is equivalent to `sw0(x1, x2, x3, x1 + x2, x1 + x3, x2 + x3, x1 + x2 + x3)`. The number of models to estimate grows at a factorial rate: so be cautious!
#'
#' Multiple independent variables can be combined with multiple dependent variables, as in `fml = c(y1, y2) ~ cw(x1, x2, x3)` which would lead to 6 estimations. Multiple estimations can also be combined to split samples (with the arguments `split`, `fsplit`).
#'
#' You can also add fixed-effects in a stepwise fashion. Note that you cannot perform stepwise estimations on the IV part of the formula (`feols` only).
#'
#' If NAs are present in the sample, to avoid too many messages, only NA removal concerning the variables common to all estimations is reported.
#'
#' A note on performance. The feature of multiple estimations has been highly optimized for `feols`, in particular in the presence of fixed-effects. It is faster to estimate multiple models using the formula rather than with a loop. For non-`feols` models using the formula is roughly similar to using a loop performance-wise.
#'
#' @section Tricks to estimate multiple LHS:
#'
#' To use multiple dependent variables in `fixest` estimations, you need to include them in a vector: like in `c(y1, y2, y3)`.
#'
#' First, if names are stored in a vector, they can readily be inserted in a formula to perform multiple estimations using the dot square bracket operator. For instance if `my_lhs = c("y1", "y2")`, calling `fixest` with, say `feols(.[my_lhs] ~ x1, etc)` is equivalent to using `feols(c(y1, y2) ~ x1, etc)`. Beware that this is a special feature unique to the *left-hand-side* of `fixest` estimations (the default behavior of the DSB operator is to aggregate with sums, see [`xpd`]).
#'
#' Second, you can use a regular expression to grep the left-hand-sides on the fly. When the `..("regex")` feature is used naked on the LHS, the variables grepped are inserted into `c()`. For example `..("Pe") ~ Sepal.Length, iris` is equivalent to `c(Petal.Length, Petal.Width) ~ Sepal.Length, iris`. Beware that this is a special feature unique to the *left-hand-side* of `fixest` estimations (the default behavior of `..("regex")` is to aggregate with sums, see [`xpd`]).
#'
#' @section Argument sliding:
#'
#' When the data set has been set up globally using [`setFixest_estimation`]`(data = data_set)`, the argument `vcov` can be used implicitly. This means that calls such as `feols(y ~ x, "HC1")`, or `feols(y ~ x, ~id)`, are valid: i) the data is automatically deduced from the global settings, and ii) the `vcov` is deduced to be the second argument.
#'
#' @section Piping:
#'
#' Although the argument 'data' is placed in second position, the data can be piped to the estimation functions. For example, with R >= 4.1, `mtcars |> feols(mpg ~ cyl)` works as `feols(mpg ~ cyl, mtcars)`.
#'
#'
#' @return
#' A `fixest` object. Note that `fixest` objects contain many elements and most of them are for internal use, they are presented here only for information. To access them, it is safer to use the user-level methods (e.g. [`vcov.fixest`], [`resid.fixest`], etc) or functions (like for instance [`fitstat`] to access any fit statistic).
#' \item{nobs}{The number of observations.}
#' \item{fml}{The linear formula of the call.}
#' \item{call}{The call of the function.}
#' \item{method}{The method used to estimate the model.}
#' \item{family}{The family used to estimate the model.}
#' \item{fml_all}{A list containing different parts of the formula. Always contain the linear formula. Then depending on the cases: `fixef`: the fixed-effects, `iv`: the IV part of the formula.}
#' \item{fixef_vars}{The names of each fixed-effect dimension.}
#' \item{fixef_id}{The list (of length the number of fixed-effects) of the fixed-effects identifiers for each observation.}
#' \item{fixef_sizes}{The size of each fixed-effect (i.e. the number of unique identifierfor each fixed-effect dimension).}
#' \item{coefficients}{The named vector of estimated coefficients.}
#' \item{multicol}{Logical, if multicollinearity was found.}
#' \item{coeftable}{The table of the coefficients with their standard errors, z-values and p-values.}
#' \item{loglik}{The loglikelihood.}
#' \item{ssr_null}{Sum of the squared residuals of the null model (containing only with the intercept).}
#' \item{ssr_fe_only}{Sum of the squared residuals of the model estimated with fixed-effects only.}
#' \item{ll_null}{The log-likelihood of the null model (containing only with the intercept).}
#' \item{ll_fe_only}{The log-likelihood of the model estimated with fixed-effects only.}
#' \item{fitted.values}{The fitted values.}
#' \item{linear.predictors}{The linear predictors.}
#' \item{residuals}{The residuals (y minus the fitted values).}
#' \item{sq.cor}{Squared correlation between the dependent variable and the expected predictor (i.e. fitted.values) obtained by the estimation.}
#' \item{hessian}{The Hessian of the parameters.}
#' \item{cov.iid}{The variance-covariance matrix of the parameters.}
#' \item{se}{The standard-error of the parameters.}
#' \item{scores}{The matrix of the scores (first derivative for each observation).}
#' \item{residuals}{The difference between the dependent variable and the expected predictor.}
#' \item{sumFE}{The sum of the fixed-effects coefficients for each observation.}
#' \item{offset}{(When relevant.) The offset formula.}
#' \item{weights}{(When relevant.) The weights formula.}
#' \item{obs_selection}{(When relevant.) List containing vectors of integers. It represents the sequential selection of observation vis a vis the original data set.}
#' \item{collin.var}{(When relevant.) Vector containing the variables removed because of collinearity.}
#' \item{collin.coef}{(When relevant.) Vector of coefficients, where the values of the variables removed because of collinearity are NA.}
#' \item{collin.min_norm}{The minimal diagonal value of the Cholesky decomposition. Small values indicate possible presence collinearity.}
#' \item{y_demeaned}{Only when `demeaned = TRUE`: the centered dependent variable.}
#' \item{X_demeaned}{Only when `demeaned = TRUE`: the centered explanatory variable.}
#'
#'
#' @seealso
#' See also [`summary.fixest`] to see the results with the appropriate standard-errors, [`fixef.fixest`] to extract the fixed-effects coefficients, and the function [`etable`] to visualize the results of multiple estimations. For plotting coefficients: see [`coefplot`].
#'
#' And other estimation methods: [`femlm`], [`feglm`], [`fepois`], [`fenegbin`], [`feNmlm`].
#'
#' @author
#' Laurent Berge
#'
#' @references
#'
#' Berge, Laurent, 2018, "Efficient estimation of maximum likelihood models with multiple fixed-effects: the R package FENmlm." CREA Discussion Papers, 13 ([](https://wwwen.uni.lu/content/download/110162/1299525/file/2018_13)).
#'
#' For models with multiple fixed-effects:
#'
#' Gaure, Simen, 2013, "OLS with multiple high dimensional category variables", Computational Statistics & Data Analysis 66 pp. 8--18
#'
#' @examples
#'
#' #
#' # Basic estimation
#' #
#'
#' res = feols(Sepal.Length ~ Sepal.Width + Petal.Length, iris)
#' # You can specify clustered standard-errors in summary:
#' summary(res, cluster = ~Species)
#'
#' #
#' # Just one set of fixed-effects:
#' #
#'
#' res = feols(Sepal.Length ~ Sepal.Width + Petal.Length | Species, iris)
#' # By default, the SEs are clustered according to the first fixed-effect
#' summary(res)
#'
#' #
#' # Varying slopes:
#' #
#'
#' res = feols(Sepal.Length ~ Petal.Length | Species[Sepal.Width], iris)
#' summary(res)
#'
#' #
#' # Combining the FEs:
#' #
#'
#' base = iris
#' base$fe_2 = rep(1:10, 15)
#' res_comb = feols(Sepal.Length ~ Petal.Length | Species^fe_2, base)
#' summary(res_comb)
#' fixef(res_comb)[[1]]
#'
#' #
#' # Using leads/lags:
#' #
#'
#' data(base_did)
#' # We need to set up the panel with the arg. panel.id
#' est1 = feols(y ~ l(x1, 0:1), base_did, panel.id = ~id+period)
#' est2 = feols(f(y) ~ l(x1, -1:1), base_did, panel.id = ~id+period)
#' etable(est1, est2, order = "f", drop="Int")
#'
#' #
#' # Using interactions:
#' #
#'
#' data(base_did)
#' # We interact the variable 'period' with the variable 'treat'
#' est_did = feols(y ~ x1 + i(period, treat, 5) | id+period, base_did)
#'
#' # Now we can plot the result of the interaction with coefplot
#' coefplot(est_did)
#' # You have many more example in coefplot help
#'
#' #
#' # Instrumental variables
#' #
#'
#' # To estimate Two stage least squares,
#' # insert a formula describing the endo. vars./instr. relation after a pipe:
#'
#' base = iris
#' names(base) = c("y", "x1", "x2", "x3", "fe1")
#' base$x_inst1 = 0.2 * base$x1 + 0.7 * base$x2 + rpois(150, 2)
#' base$x_inst2 = 0.2 * base$x2 + 0.7 * base$x3 + rpois(150, 3)
#' base$x_endo1 = 0.5 * base$y + 0.5 * base$x3 + rnorm(150, sd = 2)
#' base$x_endo2 = 1.5 * base$y + 0.5 * base$x3 + 3 * base$x_inst1 + rnorm(150, sd = 5)
#'
#' # Using 2 controls, 1 endogenous var. and 1 instrument
#' res_iv = feols(y ~ x1 + x2 | x_endo1 ~ x_inst1, base)
#'
#' # The second stage is the default
#' summary(res_iv)
#'
#' # To show the first stage:
#' summary(res_iv, stage = 1)
#'
#' # To show both the first and second stages:
#' summary(res_iv, stage = 1:2)
#'
#' # Adding a fixed-effect => IV formula always last!
#' res_iv_fe = feols(y ~ x1 + x2 | fe1 | x_endo1 ~ x_inst1, base)
#'
#' # With two endogenous regressors
#' res_iv2 = feols(y ~ x1 + x2 | x_endo1 + x_endo2 ~ x_inst1 + x_inst2, base)
#'
#' # Now there's two first stages => a fixest_multi object is returned
#' sum_res_iv2 = summary(res_iv2, stage = 1)
#'
#' # You can navigate through it by subsetting:
#' sum_res_iv2[iv = 1]
#'
#' # The stage argument also works in etable:
#' etable(res_iv, res_iv_fe, res_iv2, order = "endo")
#'
#' etable(res_iv, res_iv_fe, res_iv2, stage = 1:2, order = c("endo", "inst"),
#' group = list(control = "!endo|inst"))
#'
#' #
#' # Multiple estimations:
#' #
#'
#' # 6 estimations
#' est_mult = feols(c(Ozone, Solar.R) ~ Wind + Temp + csw0(Wind:Temp, Day), airquality)
#'
#' # We can display the results for the first lhs:
#' etable(est_mult[lhs = 1])
#'
#' # And now the second (access can be made by name)
#' etable(est_mult[lhs = "Solar.R"])
#'
#' # Now we focus on the two last right hand sides
#' # (note that .N can be used to specify the last item)
#' etable(est_mult[rhs = 2:.N])
#'
#' # Combining with split
#' est_split = feols(c(Ozone, Solar.R) ~ sw(poly(Wind, 2), poly(Temp, 2)),
#' airquality, split = ~ Month)
#'
#' # You can display everything at once with the print method
#' est_split
#'
#' # Different way of displaying the results with "compact"
#' summary(est_split, "compact")
#'
#' # You can still select which sample/LHS/RHS to display
#' est_split[sample = 1:2, lhs = 1, rhs = 1]
#'
#' #
#' # Split sample estimations
#' #
#'
#' base = setNames(iris, c("y", "x1", "x2", "x3", "species"))
#'
#' est = feols(y ~ x.[1:3], base, split = ~species)
#' etable(est)
#'
#' # You can select specific values with the %keep% and %drop% operators
#' # By default, partial matching is enabled. It should refer to a single variable.
#' est = feols(y ~ x.[1:3], base, split = ~species %keep% c("set", "vers"))
#' etable(est)
#'
#' # You can supply regular expression by using an @ first.
#' # regex can match several values.
#' est = feols(y ~ x.[1:3], base, split = ~species %keep% c("@set|vers"))
#' etable(est)
#'
#' #
#' # Argument sliding
#' #
#'
#' # When the data set is set up globally, you can use the vcov argument implicitly
#'
#' base = setNames(iris, c("y", "x1", "x2", "x3", "species"))
#'
#' no_sliding = feols(y ~ x1 + x2, base, ~species)
#'
#' # With sliding
#' setFixest_estimation(data = base)
#'
#' # ~species is implicitly deduced to be equal to 'vcov'
#' sliding = feols(y ~ x1 + x2, ~species)
#'
#' etable(no_sliding, sliding)
#'
#' # Resetting the global options
#' setFixest_estimation(data = NULL)
#'
#'
#' #
#' # Formula expansions
#' #
#'
#' # By default, the features of the xpd function are enabled in
#' # all fixest estimations
#' # Here's a few examples
#'
#' base = setNames(iris, c("y", "x1", "x2", "x3", "species"))
#'
#' # dot square bracket operator
#' feols(y ~ x.[1:3], base)
#'
#' # fetching variables via regular expressions: ..("regex")
#' feols(y ~ ..("1|2"), base)
#'
#' # NOTA: it also works for multiple LHS
#' mult1 = feols(x.[1:2] ~ y + species, base)
#' mult2 = feols(..("y|3") ~ x.[1:2] + species, base)
#' etable(mult1, mult2)
#'
#'
#' # Use .[, stuff] to include variables in functions:
#' feols(y ~ csw(x.[, 1:3]), base)
#'
#' # Same for ..(, "regex")
#' feols(y ~ csw(..(,"x")), base)
#'
#'
#'
feols = function(fml, data, vcov, weights, offset, subset, split, fsplit, split.keep, split.drop,
cluster, se,
ssc, panel.id, fixef, fixef.rm = "none", fixef.tol = 1e-6,
fixef.iter = 10000, collin.tol = 1e-10, nthreads = getFixest_nthreads(),
lean = FALSE, verbose = 0, warn = TRUE, notes = getFixest_notes(),
only.coef = FALSE,
combine.quick, demeaned = FALSE, mem.clean = FALSE, only.env = FALSE, env, ...){
dots = list(...)
# 1st: is the call coming from feglm?
fromGLM = FALSE
skip_fixef = FALSE
if("fromGLM" %in% names(dots)){
fromGLM = TRUE
# env is provided by feglm
X = dots$X
y = as.vector(dots$y)
init = dots$means
correct_0w = dots$correct_0w
only.coef = FALSE
# IN_MULTI is only used to trigger notes, this happens only within feglm
IN_MULTI = FALSE
if(verbose){
# I can't really mutualize these three lines of code since the verbose
# needs to be checked before using it, and here it's an internal call
time_start = proc.time()
gt = function(x, nl = TRUE) cat(sfill(x, 20), ": ", -(t0 - (t0<<-proc.time()))[3], "s", ifelse(nl, "\n", ""), sep = "")
t0 = proc.time()
}
} else {
time_start = proc.time()
gt = function(x, nl = TRUE) cat(sfill(x, 20), ": ", -(t0 - (t0<<-proc.time()))[3], "s", ifelse(nl, "\n", ""), sep = "")
t0 = proc.time()
# we use fixest_env for appropriate controls and data handling
if(missing(env)){
set_defaults("fixest_estimation")
call_env = new.env(parent = parent.frame())
env = try(fixest_env(fml = fml, data = data, weights = weights, offset = offset,
subset = subset, split = split, fsplit = fsplit,
split.keep = split.keep, split.drop = split.drop,
vcov = vcov, cluster = cluster, se = se, ssc = ssc,
panel.id = panel.id, fixef = fixef, fixef.rm = fixef.rm,
fixef.tol = fixef.tol, fixef.iter = fixef.iter, collin.tol = collin.tol,
nthreads = nthreads, lean = lean, verbose = verbose, warn = warn,
notes = notes, only.coef = only.coef, combine.quick = combine.quick, demeaned = demeaned,
mem.clean = mem.clean, origin = "feols", mc_origin = match.call(),
call_env = call_env, ...), silent = TRUE)
} else if((r <- !is.environment(env)) || !isTRUE(env$fixest_env)) {
stop("Argument 'env' must be an environment created by a fixest estimation. Currently it is not ", ifelse(r, "an", "a 'fixest'"), " environment.")
}
if("try-error" %in% class(env)){
stop(format_error_msg(env, "feols"))
}
check_arg(only.env, "logical scalar")
if(only.env){
return(env)
}
y = get("lhs", env)
X = get("linear.mat", env)
nthreads = get("nthreads", env)
init = 0
# demeaned variables
if(!is.null(dots$X_demean)){
skip_fixef = TRUE
X_demean = dots$X_demean
y_demean = dots$y_demean
}
# offset
offset = get("offset.value", env)
isOffset = length(offset) > 1
if(isOffset){
y = y - offset
}
# weights
weights = get("weights.value", env)
isWeight = length(weights) > 1
correct_0w = FALSE
mem.clean = get("mem.clean", env)
demeaned = get("demeaned", env)
warn = get("warn", env)
only.coef = get("only.coef", env)
IN_MULTI = get("IN_MULTI", env)
is_multi_root = get("is_multi_root", env)
if(is_multi_root){
on.exit(release_multi_notes())
assign("is_multi_root", FALSE, env)
}
verbose = get("verbose", env)
if(verbose >= 2) gt("Setup")
}
isFixef = get("isFixef", env)
# Used to solve with the reduced model
xwx = dots$xwx
xwy = dots$xwy
#
# Split ####
#
do_split = get("do_split", env)
if(do_split){
res = multi_split(env, feols)
return(res)
}
#
# Multi fixef ####
#
do_multi_fixef = get("do_multi_fixef", env)
if(do_multi_fixef){
res = multi_fixef(env, feols)
return(res)
}
#
# Multi LHS and RHS ####
#
do_multi_lhs = get("do_multi_lhs", env)
do_multi_rhs = get("do_multi_rhs", env)
if(do_multi_lhs || do_multi_rhs){
assign("do_multi_lhs", FALSE, env)
assign("do_multi_rhs", FALSE, env)
do_iv = get("do_iv", env)
fml = get("fml", env)
lhs_names = get("lhs_names", env)
lhs = y
if(do_multi_lhs){
# We find out which LHS have the same NA patterns => saves a lot of computation
n_lhs = length(lhs)
lhs_group_is_na = list()
lhs_group_id = c()
lhs_group_n_na = c()
for(i in 1:n_lhs){
is_na_current = !is.finite(lhs[[i]])
n_na_current = sum(is_na_current)
if(i == 1){
lhs_group_id = 1
lhs_group_is_na[[1]] = is_na_current
lhs_group_n_na[1] = n_na_current
} else {
qui = which(lhs_group_n_na == n_na_current)
if(length(qui) > 0){
if(n_na_current == 0){
# no need to check the pattern
lhs_group_id[i] = lhs_group_id[qui[1]]
next
}
for(j in qui){
if(all(is_na_current == lhs_group_is_na[[j]])){
lhs_group_id[i] = lhs_group_id[j]
next
}
}
}
# if here => new group because couldn't be matched
id = max(lhs_group_id) + 1
lhs_group_id[i] = id
lhs_group_is_na[[id]] = is_na_current
lhs_group_n_na[id] = n_na_current
}
}
# we make groups
lhs_group = list()
for(i in 1:max(lhs_group_id)){
lhs_group[[i]] = which(lhs_group_id == i)
}
} else if(do_multi_lhs == FALSE){
lhs_group_is_na = list(FALSE)
lhs_group_n_na = 0
lhs_group = list(1)
lhs = list(lhs) # I really abuse R shallow copy system...
names(lhs) = deparse_long(fml[[2]])
}
if(do_multi_rhs){
rhs_info_stepwise = get("rhs_info_stepwise", env)
multi_rhs_fml_full = rhs_info_stepwise$fml_all_full
multi_rhs_fml_sw = rhs_info_stepwise$fml_all_sw
multi_rhs_cumul = rhs_info_stepwise$is_cumul
linear_core = get("linear_core", env)
rhs = get("rhs_sw", env)
# Two schemes:
# - if cumulative: we take advantage of it => both in demeaning and in estimation
# - if regular stepwise => only in demeaning
# => of course this is dependent on the pattern of NAs
#
n_core_left = if(length(linear_core$left) == 1) 0 else ncol(linear_core$left)
n_core_right = if(length(linear_core$right) == 1) 0 else ncol(linear_core$right)
# rnc: running number of columns
rnc = n_core_left
if(rnc == 0){
col_start = integer(0)
} else {
col_start = 1:rnc
}
rhs_group_is_na = list()
rhs_group_id = c()
rhs_group_n_na = c()
rhs_n_vars = c()
rhs_col_id = list()
any_na_rhs = FALSE
for(i in seq_along(multi_rhs_fml_sw)){
# We evaluate the extra data and check the NA pattern
my_fml = multi_rhs_fml_sw[[i]]
if(i == 1 && (multi_rhs_cumul || identical(my_fml[[3]], 1))){
# That case is already in the main linear.mat => no NA
rhs_group_id = 1
rhs_group_is_na[[1]] = FALSE
rhs_group_n_na[1] = 0
rhs_n_vars[1] = 0
rhs[[1]] = 0
if(rnc == 0){
rhs_col_id[[1]] = integer(0)
} else {
rhs_col_id[[1]] = 1:rnc
}
next
}
rhs_current = rhs[[i]]
rhs_n_vars[i] = ncol(rhs_current)
info = cpppar_which_na_inf_mat(rhs_current, nthreads)
is_na_current = info$is_na_inf
if(multi_rhs_cumul && any_na_rhs){
# we cumulate the NAs
is_na_current = is_na_current | rhs_group_is_na[[rhs_group_id[i - 1]]]
info$any_na_inf = any(is_na_current)
}
n_na_current = 0
if(info$any_na_inf){
any_na_rhs = TRUE
n_na_current = sum(is_na_current)
} else {
# NULL would lead to problems down the road
is_na_current = FALSE
}
if(i == 1){
rhs_group_id = 1
rhs_group_is_na[[1]] = is_na_current
rhs_group_n_na[1] = n_na_current
} else {
qui = which(rhs_group_n_na == n_na_current)
if(length(qui) > 0){
if(n_na_current == 0){
# no need to check the pattern
rhs_group_id[i] = rhs_group_id[qui[1]]
next
}
go_next = FALSE
for(j in qui){
if(all(is_na_current == rhs_group_is_na[[j]])){
rhs_group_id[i] = j
go_next = TRUE
break
}
}
if(go_next) next
}
# if here => new group because couldn't be matched
id = max(rhs_group_id) + 1
rhs_group_id[i] = id
rhs_group_is_na[[id]] = is_na_current
rhs_group_n_na[id] = n_na_current
}
}
# we make groups
rhs_group = list()
for(i in 1:max(rhs_group_id)){
rhs_group[[i]] = which(rhs_group_id == i)
}
# Finding the right column IDs to select
rhs_group_n_vars = rep(0, length(rhs_group)) # To get the total nber of cols per group
for(i in seq_along(multi_rhs_fml_sw)){
if(multi_rhs_cumul){
rnc = rnc + rhs_n_vars[i]
if(rnc == 0){
rhs_col_id[[i]] = integer(0)
} else {
rhs_col_id[[i]] = 1:rnc
}
} else {
id = rhs_group_id[i]
rhs_col_id[[i]] = c(col_start, seq(rnc + rhs_group_n_vars[id] + 1, length.out = rhs_n_vars[i]))
rhs_group_n_vars[id] = rhs_group_n_vars[id] + rhs_n_vars[i]
}
}
if(n_core_right > 0){
# We adjust
if(multi_rhs_cumul){
for(i in seq_along(multi_rhs_fml_sw)){
id = rhs_group_id[i]
gmax = max(rhs_group[[id]])
rhs_col_id[[i]] = c(rhs_col_id[[i]], n_core_left + sum(rhs_n_vars[1:gmax]) + 1:n_core_right)
}
} else {
for(i in seq_along(multi_rhs_fml_sw)){
id = rhs_group_id[i]
rhs_col_id[[i]] = c(rhs_col_id[[i]], n_core_left + rhs_group_n_vars[id] + 1:n_core_right)
}
}
}
} else if(do_multi_rhs == FALSE){
multi_rhs_fml_full = list(.xpd(rhs = fml[[3]]))
multi_rhs_cumul = FALSE
rhs_group_is_na = list(FALSE)
rhs_group_n_na = 0
rhs_n_vars = 0
rhs_group = list(1)
rhs = list(0)
rhs_col_id = list(1:NCOL(X))
linear_core = list(left = X, right = 1)
}
isLinear_right = length(linear_core$right) > 1
isLinear = length(linear_core$left) > 1 || isLinear_right
n_lhs = length(lhs)
n_rhs = length(rhs)
res = vector("list", n_lhs * n_rhs)
rhs_names = sapply(multi_rhs_fml_full, function(x) as.character(x)[[2]])
for(i in seq_along(lhs_group)){
for(j in seq_along(rhs_group)){
# NA removal
no_na = FALSE
if(lhs_group_n_na[i] > 0){
if(rhs_group_n_na[j] > 0){
is_na_current = lhs_group_is_na[[i]] | rhs_group_is_na[[j]]
} else {
is_na_current = lhs_group_is_na[[i]]
}
} else if(rhs_group_n_na[j] > 0){
is_na_current = rhs_group_is_na[[j]]
} else {
no_na = TRUE
}
# Here it depends on whether there are FEs or not, whether it's cumul or not
my_lhs = lhs[lhs_group[[i]]]
if(isLinear){
my_rhs = linear_core[1]
if(multi_rhs_cumul){
gmax = max(rhs_group[[j]])
my_rhs[1 + (1:gmax)] = rhs[1:gmax]
} else {
for(u in rhs_group[[j]]){
if(length(rhs[[u]]) > 1){
my_rhs[[length(my_rhs) + 1]] = rhs[[u]]
}
}
}
if(isLinear_right){
my_rhs[[length(my_rhs) + 1]] = linear_core$right
}
} else{
rhs_len = lengths(rhs)
if(multi_rhs_cumul){
gmax = max(rhs_group[[j]])
my_rhs = rhs[rhs_len > 1 & seq_along(rhs) <= gmax]
} else {
my_rhs = rhs[rhs_len > 1 & seq_along(rhs) %in% rhs_group[[j]]]
}
if(isLinear_right){
my_rhs[[length(my_rhs) + 1]] = linear_core$right
}
}
len_all = lengths(my_rhs)
if(any(len_all == 1)){
my_rhs = my_rhs[len_all > 1]
}
if(!no_na){
# NA removal
for(u in seq_along(my_lhs)){
my_lhs[[u]] = my_lhs[[u]][!is_na_current]
}
for(u in seq_along(my_rhs)){
if(length(my_rhs[[u]]) > 1) my_rhs[[u]] = my_rhs[[u]][!is_na_current, , drop = FALSE]
}
my_env = reshape_env(env, obs2keep = which(!is_na_current), assign_lhs = FALSE, assign_rhs = FALSE)
} else {
my_env = reshape_env(env)
}
weights = get("weights.value", my_env)
all_varnames = NULL
isLinear_current = TRUE
if(length(my_rhs) == 0){
X_all = 0
isLinear_current = FALSE
} else {
# We try to avoid repeating variables
# => can happen in stepwise estimations (not in csw)
all_varnames = unlist(sapply(my_rhs, colnames))
all_varnames_unik = unique(all_varnames)
all_varnames_done = rep(FALSE, length(all_varnames_unik))
all_varnames_done = all_varnames_unik %in% colnames(my_rhs[[1]])
dict_vars = 1:length(all_varnames_unik)
names(dict_vars) = all_varnames_unik
n_rhs_current = length(my_rhs)
args_cbind = vector("list", n_rhs_current)
args_cbind[[1]] = my_rhs[[1]]
id_rhs = 2
while(!all(all_varnames_done) && id_rhs <= n_rhs_current){
rhs_current = my_rhs[[id_rhs]]
qui = !colnames(rhs_current) %in% all_varnames_unik[all_varnames_done]
if(any(qui)){
args_cbind[[id_rhs]] = rhs_current[, qui, drop = FALSE]
all_varnames_done = all_varnames_done | all_varnames_unik %in% colnames(rhs_current)
}
id_rhs = id_rhs + 1
}
X_all = do.call("cbind", args_cbind)
}
if(do_iv){
# We need to GET them => they have been modified in my_env
iv_lhs = get("iv_lhs", my_env)
iv.mat = get("iv.mat", my_env)
n_inst = ncol(iv.mat)
}
if(isFixef){
# We batch demean
n_vars_X = ifelse(is.null(ncol(X_all)), 0, ncol(X_all))
# fixef information
fixef_sizes = get("fixef_sizes", my_env)
fixef_table_vector = get("fixef_table_vector", my_env)
fixef_id_list = get("fixef_id_list", my_env)
slope_flag = get("slope_flag", my_env)
slope_vars = get("slope_variables", my_env)
if(mem.clean) gc()
vars_demean = cpp_demean(my_lhs, X_all, weights, iterMax = fixef.iter,
diffMax = fixef.tol, r_nb_id_Q = fixef_sizes,
fe_id_list = fixef_id_list, table_id_I = fixef_table_vector,
slope_flag_Q = slope_flag, slope_vars_list = slope_vars,
r_init = init, nthreads = nthreads)
X_demean = vars_demean$X_demean
y_demean = vars_demean$y_demean
if(do_iv){
iv_vars_demean = cpp_demean(iv_lhs, iv.mat, weights, iterMax = fixef.iter,
diffMax = fixef.tol, r_nb_id_Q = fixef_sizes,
fe_id_list = fixef_id_list, table_id_I = fixef_table_vector,
slope_flag_Q = slope_flag, slope_vars_list = slope_vars,
r_init = init, nthreads = nthreads)
iv.mat_demean = iv_vars_demean$X_demean
iv_lhs_demean = iv_vars_demean$y_demean
}
}
# We precompute the solution
if(do_iv){
if(isFixef){
iv_products = cpp_iv_products(X = X_demean, y = y_demean,
Z = iv.mat_demean, u = iv_lhs_demean,
w = weights, nthreads = nthreads)
} else {
if(!is.matrix(X_all)){
X_all = as.matrix(X_all)
}
iv_products = cpp_iv_products(X = X_all, y = my_lhs, Z = iv.mat,
u = iv_lhs, w = weights, nthreads = nthreads)
}
} else {
if(isFixef){
my_products = cpp_sparse_products(X_demean, weights, y_demean, nthreads = nthreads)
} else {
my_products = cpp_sparse_products(X_all, weights, my_lhs, nthreads = nthreads)
}
xwx = my_products$XtX
xwy = my_products$Xty
}
for(ii in seq_along(my_lhs)){
i_lhs = lhs_group[[i]][ii]
for(jj in rhs_group[[j]]){
# linking the unique variables to the variables
qui_X = rhs_col_id[[jj]]
if(!is.null(all_varnames)){
qui_X = dict_vars[all_varnames[qui_X]]
}
if(isLinear_current){
my_X = X_all[, qui_X, drop = FALSE]
} else {
my_X = 0
}
my_fml = .xpd(lhs = lhs_names[i_lhs], rhs = multi_rhs_fml_full[[jj]])
current_env = reshape_env(my_env, lhs = my_lhs[[ii]], rhs = my_X, fml_linear = my_fml)
if(do_iv){
if(isLinear_current){
qui_iv = c(1:n_inst, n_inst + qui_X)
XtX = iv_products$XtX[qui_X, qui_X, drop = FALSE]
Xty = iv_products$Xty[[ii]][qui_X]
} else {
qui_iv = 1:n_inst
XtX = matrix(0, 1, 1)
Xty = matrix(0, 1, 1)
}
my_iv_products = list(XtX = XtX,
Xty = Xty,
ZXtZX = iv_products$ZXtZX[qui_iv, qui_iv, drop = FALSE],
ZXtu = lapply(iv_products$ZXtu, function(x) x[qui_iv]))
if(isFixef){
my_res = feols(env = current_env, iv_products = my_iv_products,
X_demean = X_demean[ , qui_X, drop = FALSE],
y_demean = y_demean[[ii]],
iv.mat_demean = iv.mat_demean, iv_lhs_demean = iv_lhs_demean)
} else {
my_res = feols(env = current_env, iv_products = my_iv_products)
}
} else {
if(isFixef){
my_res = feols(env = current_env, xwx = xwx[qui_X, qui_X, drop = FALSE], xwy = xwy[[ii]][qui_X],
X_demean = X_demean[ , qui_X, drop = FALSE],